CombinedText stringlengths 4 3.42M |
|---|
package main
import (
"KudosBackend/models"
"fmt"
"log"
"net/http"
"os"
_ "github.com/mattn/go-sqlite3"
)
func main() {
models.InitDB("./kudos.db")
SelectMode(os.Args)
}
func SelectMode(args []string) {
if len(args) > 1 {
switch args[1] {
case "serve":
router := models.NewRouter()
log.Println("Listening...")
log.Fatal(http.ListenAndServe(":3000", router))
case "post":
models.PostKudoCount(args[2])
case "get":
if len(args[1:]) > 1 {
kudoCount := models.GetKudoCount(args[2])
fmt.Println(kudoCount)
} else {
//models.GetAllKudos()
}
case "view":
//models.GetAllKudos()
case "reset":
models.ResetDB()
default:
fmt.Println("Invalid argument.")
}
} else {
fmt.Println("Available commands: serve, get, post, view, reset")
}
}
adjusting imports for travis-ci; drives me crazy!
package main
import (
"fmt"
"log"
"net/http"
"os"
"github.com/voigt/KudosBackend/models"
_ "github.com/mattn/go-sqlite3"
)
func main() {
models.InitDB("./kudos.db")
SelectMode(os.Args)
}
func SelectMode(args []string) {
if len(args) > 1 {
switch args[1] {
case "serve":
router := models.NewRouter()
log.Println("Listening...")
log.Fatal(http.ListenAndServe(":3000", router))
case "post":
models.PostKudoCount(args[2])
case "get":
if len(args[1:]) > 1 {
kudoCount := models.GetKudoCount(args[2])
fmt.Println(kudoCount)
} else {
//models.GetAllKudos()
}
case "view":
//models.GetAllKudos()
case "reset":
models.ResetDB()
default:
fmt.Println("Invalid argument.")
}
} else {
fmt.Println("Available commands: serve, get, post, view, reset")
}
}
|
package kv
import (
"fmt"
"os"
"path"
"github.com/s3git/s3git-go/config"
mdb "github.com/szferi/gomdb"
"encoding/hex"
)
var env *mdb.Env
// KV databases containing root level digests for different object types
// When a particular key is present, the value is as follows:
// - when empty, underlying chunk(s) are not cached locally
// - when set, it is the concatenation of the leaf level digests for all nodes
// (and thus necessarily needs to correspond when BLAKE2'd to its key)
//
// If you know the type of the key, you can fetch it directly for the corresponding database
// If you do not know the type, you need to search all stores
var dbiLevel1Blobs mdb.DBI
var dbiLevel1Commits mdb.DBI
var dbiLevel1Prefixes mdb.DBI
var dbiLevel1Trees mdb.DBI
// KV database containing overview of added/removed blobs in stage
var dbiStage mdb.DBI
func OpenDatabase() error {
mdbDir := path.Join(config.Config.S3gitCasPath, ".mdb")
err := os.MkdirAll(mdbDir, 0777)
if err != nil {
return err
}
env, _ = mdb.NewEnv()
// TODO: Figure out proper size for lmdb
env.SetMapSize(1 << 36) // max file size
env.SetMaxDBs(10) // up to 10 named databases
env.Open(mdbDir, 0, 0664)
txn, _ := env.BeginTxn(nil, 0)
// overview of blobs in stage
dbstage := "stage"
dbiStage, _ = txn.DBIOpen(&dbstage, mdb.CREATE)
// Level 1 databases
dbl1blobs := "l1blobs"
dbiLevel1Blobs, _ = txn.DBIOpen(&dbl1blobs, mdb.CREATE)
dbl1commits := "l1commits"
dbiLevel1Commits, _ = txn.DBIOpen(&dbl1commits, mdb.CREATE)
dbl1prefixes := "l1prefixes"
dbiLevel1Prefixes, _ = txn.DBIOpen(&dbl1prefixes, mdb.CREATE)
dbl1trees := "l1trees"
dbiLevel1Trees, _ = txn.DBIOpen(&dbl1trees, mdb.CREATE)
txn.Commit()
// TODO: Make sure all databases are flushed before exiting program
// defer env.DBIClose(dbi)
// defer env.Close()
return nil
}
func AddToStage(key string) error {
hex, _ := hex.DecodeString(key)
txn, _ := env.BeginTxn(nil, 0)
txn.Put(dbiStage, hex, nil, 0)
txn.Commit()
return nil
}
func ClearStage() error {
list, err := listMdb(&dbiStage, "")
if err != nil {
return err
}
txn, _ := env.BeginTxn(nil, 0)
for k := range list {
txn.Del(dbiStage, k, nil)
}
txn.Commit()
return nil
}
func ListStage() (<-chan []byte, error) {
return listMdb(&dbiStage, "")
}
func ListLevel1Commits() (<-chan []byte, error) {
return listMdb(&dbiLevel1Commits, "")
}
func ListLevel1Prefixes() (<-chan []byte, error) {
return listMdb(&dbiLevel1Prefixes, "")
}
func ListLevel1Trees() (<-chan []byte, error) {
return listMdb(&dbiLevel1Trees, "")
}
func ListLevel1Blobs(query string) (<-chan []byte, error) {
return listMdb(&dbiLevel1Blobs, query)
}
// TODO: Remove duplicate definitions
const BLOB="blob"
const COMMIT="commit"
const PREFIX="prefix"
const TREE="tree"
func getDbForObjectType( objType string) *mdb.DBI {
var dbi *mdb.DBI
switch objType {
case BLOB:
dbi = &dbiLevel1Blobs
case COMMIT:
dbi = &dbiLevel1Commits
case PREFIX:
dbi = &dbiLevel1Prefixes
case TREE:
dbi = &dbiLevel1Trees
default:
panic(fmt.Sprintf("Bad type: %s", objType))
}
return dbi
}
func AddToLevel1(key, value []byte, objType string) error {
dbi := getDbForObjectType(objType)
txn, _ := env.BeginTxn(nil, 0)
txn.Put(*dbi, key, value, 0)
txn.Commit()
return nil
}
func AddMultiToLevel1(keys, values [][]byte, objType string) error {
dbi := getDbForObjectType(objType)
txn, _ := env.BeginTxn(nil, 0)
for index, key := range keys {
txn.Put(*dbi, key, values[index], 0)
}
txn.Commit()
return nil
}
// Get object of any type, return value and type
func GetLevel1(key []byte) ([]byte, string, error) {
txn, _ := env.BeginTxn(nil, mdb.RDONLY)
defer txn.Abort()
val, err := txn.Get(dbiLevel1Blobs, key)
if err != nil && !(err == mdb.NotFound) {
return nil, "", err
} else if !(err == mdb.NotFound) {
return val, BLOB, err
}
val, err = txn.Get(dbiLevel1Commits, key)
if err != nil && !(err == mdb.NotFound) {
return nil, "", err
} else if !(err == mdb.NotFound) {
return val, COMMIT, err
}
val, err = txn.Get(dbiLevel1Prefixes, key)
if err != nil && !(err == mdb.NotFound) {
return nil, "", err
} else if !(err == mdb.NotFound) {
return val, PREFIX, err
}
val, err = txn.Get(dbiLevel1Trees, key)
if err != nil && !(err == mdb.NotFound) {
return nil, "", err
} else if !(err == mdb.NotFound) {
return val, TREE, err
}
return nil, "", mdb.NotFound
}
func listMdb(dbi *mdb.DBI, query string) (<-chan []byte, error) {
result := make(chan []byte)
go func() {
// make sure we always close the channel
defer close(result)
// scan the database
txn, _ := env.BeginTxn(nil, mdb.RDONLY)
defer txn.Abort()
cursor, _ := txn.CursorOpen(*dbi)
defer cursor.Close()
setRangeUponStart := len(query) > 0
var queryKey []byte
if setRangeUponStart {
q := query
if len(q) % 2 == 1 {
q = q + "0"
}
queryKey, _ = hex.DecodeString(q)
}
for {
var bkey []byte
if setRangeUponStart {
bval, _, err := cursor.GetVal(queryKey, nil, mdb.SET_RANGE)
if err == mdb.NotFound {
break
}
if err != nil {
// TODO: Log error
return
}
bkey = bval.Bytes()
setRangeUponStart = false
} else {
var err error
bkey, _, err = cursor.Get(nil, nil, mdb.NEXT)
if err == mdb.NotFound {
break
}
if err != nil {
// TODO: Log error
return
}
}
// break early is start of key is not longer
if hex.EncodeToString(bkey)[:len(query)] != query {
break
}
result <- bkey
}
}()
return result, nil
}
Add top most commits to KV
package kv
import (
"fmt"
"os"
"path"
"github.com/s3git/s3git-go/config"
mdb "github.com/szferi/gomdb"
"encoding/hex"
)
var env *mdb.Env
// KV databases containing root level digests for different object types
// When a particular key is present, the value is as follows:
// - when empty, underlying chunk(s) are not cached locally
// - when set, it is the concatenation of the leaf level digests for all nodes
// (and thus necessarily needs to correspond when BLAKE2'd to its key)
//
// If you know the type of the key, you can fetch it directly for the corresponding database
// If you do not know the type, you need to search all stores
var dbiLevel1Blobs mdb.DBI
var dbiLevel1Commits mdb.DBI
var dbiLevel1Prefixes mdb.DBI
var dbiLevel1Trees mdb.DBI
// KV database containing overview of added/removed blobs in stage
var dbiStage mdb.DBI
// KV database top most commits (may be more than one or zero initially)
var dbiTopMostCommits mdb.DBI
func OpenDatabase() error {
mdbDir := path.Join(config.Config.S3gitCasPath, ".mdb")
err := os.MkdirAll(mdbDir, 0777)
if err != nil {
return err
}
env, _ = mdb.NewEnv()
// TODO: Figure out proper size for lmdb
env.SetMapSize(1 << 36) // max file size
env.SetMaxDBs(10) // up to 10 named databases
env.Open(mdbDir, 0, 0664)
txn, _ := env.BeginTxn(nil, 0)
// overview of blobs in stage
dbstage := "stage"
dbiStage, _ = txn.DBIOpen(&dbstage, mdb.CREATE)
// list of top most commits
dbtopMostCommits := "topmostcommits"
dbiTopMostCommits, _ = txn.DBIOpen(&dbtopMostCommits, mdb.CREATE)
// Level 1 databases
dbl1blobs := "l1blobs"
dbiLevel1Blobs, _ = txn.DBIOpen(&dbl1blobs, mdb.CREATE)
dbl1commits := "l1commits"
dbiLevel1Commits, _ = txn.DBIOpen(&dbl1commits, mdb.CREATE)
dbl1prefixes := "l1prefixes"
dbiLevel1Prefixes, _ = txn.DBIOpen(&dbl1prefixes, mdb.CREATE)
dbl1trees := "l1trees"
dbiLevel1Trees, _ = txn.DBIOpen(&dbl1trees, mdb.CREATE)
txn.Commit()
// TODO: Make sure all databases are flushed before exiting program
// defer env.DBIClose(dbi)
// defer env.Close()
return nil
}
func AddToStage(key string) error {
hx, _ := hex.DecodeString(key)
txn, _ := env.BeginTxn(nil, 0)
txn.Put(dbiStage, hx, nil, 0)
txn.Commit()
return nil
}
func ClearStage() error {
list, err := listMdb(&dbiStage, "")
if err != nil {
return err
}
txn, _ := env.BeginTxn(nil, 0)
for k := range list {
txn.Del(dbiStage, k, nil)
}
txn.Commit()
return nil
}
func ListStage() (<-chan []byte, error) {
return listMdb(&dbiStage, "")
}
func AddTopMostCommit(key string) error {
hx, _ := hex.DecodeString(key)
txn, _ := env.BeginTxn(nil, 0)
txn.Put(dbiTopMostCommits, hx, nil, 0)
txn.Commit()
return nil
}
func RemoveTopMostCommit(key string) error {
hx, _ := hex.DecodeString(key)
txn, _ := env.BeginTxn(nil, 0)
txn.Del(dbiTopMostCommits, hx, nil)
txn.Commit()
return nil
}
func ListTopMostCommits() (<-chan []byte, error) {
return listMdb(&dbiTopMostCommits, "")
}
func ListLevel1Commits() (<-chan []byte, error) {
return listMdb(&dbiLevel1Commits, "")
}
func ListLevel1Prefixes() (<-chan []byte, error) {
return listMdb(&dbiLevel1Prefixes, "")
}
func ListLevel1Trees() (<-chan []byte, error) {
return listMdb(&dbiLevel1Trees, "")
}
func ListLevel1Blobs(query string) (<-chan []byte, error) {
return listMdb(&dbiLevel1Blobs, query)
}
// TODO: Remove duplicate definitions
const BLOB="blob"
const COMMIT="commit"
const PREFIX="prefix"
const TREE="tree"
func getDbForObjectType( objType string) *mdb.DBI {
var dbi *mdb.DBI
switch objType {
case BLOB:
dbi = &dbiLevel1Blobs
case COMMIT:
dbi = &dbiLevel1Commits
case PREFIX:
dbi = &dbiLevel1Prefixes
case TREE:
dbi = &dbiLevel1Trees
default:
panic(fmt.Sprintf("Bad type: %s", objType))
}
return dbi
}
func AddToLevel1(key, value []byte, objType string) error {
dbi := getDbForObjectType(objType)
txn, _ := env.BeginTxn(nil, 0)
txn.Put(*dbi, key, value, 0)
txn.Commit()
return nil
}
func AddMultiToLevel1(keys, values [][]byte, objType string) error {
dbi := getDbForObjectType(objType)
txn, _ := env.BeginTxn(nil, 0)
for index, key := range keys {
txn.Put(*dbi, key, values[index], 0)
}
txn.Commit()
return nil
}
// Get object of any type, return value and type
func GetLevel1(key []byte) ([]byte, string, error) {
txn, _ := env.BeginTxn(nil, mdb.RDONLY)
defer txn.Abort()
val, err := txn.Get(dbiLevel1Blobs, key)
if err != nil && !(err == mdb.NotFound) {
return nil, "", err
} else if !(err == mdb.NotFound) {
return val, BLOB, err
}
val, err = txn.Get(dbiLevel1Commits, key)
if err != nil && !(err == mdb.NotFound) {
return nil, "", err
} else if !(err == mdb.NotFound) {
return val, COMMIT, err
}
val, err = txn.Get(dbiLevel1Prefixes, key)
if err != nil && !(err == mdb.NotFound) {
return nil, "", err
} else if !(err == mdb.NotFound) {
return val, PREFIX, err
}
val, err = txn.Get(dbiLevel1Trees, key)
if err != nil && !(err == mdb.NotFound) {
return nil, "", err
} else if !(err == mdb.NotFound) {
return val, TREE, err
}
return nil, "", mdb.NotFound
}
func listMdb(dbi *mdb.DBI, query string) (<-chan []byte, error) {
result := make(chan []byte)
go func() {
// make sure we always close the channel
defer close(result)
// scan the database
txn, _ := env.BeginTxn(nil, mdb.RDONLY)
defer txn.Abort()
cursor, _ := txn.CursorOpen(*dbi)
defer cursor.Close()
setRangeUponStart := len(query) > 0
var queryKey []byte
if setRangeUponStart {
q := query
if len(q) % 2 == 1 {
q = q + "0"
}
queryKey, _ = hex.DecodeString(q)
}
for {
var bkey []byte
if setRangeUponStart {
bval, _, err := cursor.GetVal(queryKey, nil, mdb.SET_RANGE)
if err == mdb.NotFound {
break
}
if err != nil {
// TODO: Log error
return
}
bkey = bval.Bytes()
setRangeUponStart = false
} else {
var err error
bkey, _, err = cursor.Get(nil, nil, mdb.NEXT)
if err == mdb.NotFound {
break
}
if err != nil {
// TODO: Log error
return
}
}
// break early if start of key is not longer
if hex.EncodeToString(bkey)[:len(query)] != query {
break
}
result <- bkey
}
}()
return result, nil
}
|
package influxdb
import (
"context"
)
// ErrLabelNotFound is the error for a missing Label.
const ErrLabelNotFound = ChronografError("label not found")
const (
OpFindLabels = "FindLabels"
OpFindLabelByID = "FindLabelByID"
OpFindLabelMapping = "FindLabelMapping"
OpCreateLabel = "CreateLabel"
OpCreateLabelMapping = "CreateLabelMapping"
OpUpdateLabel = "UpdateLabel"
OpDeleteLabel = "DeleteLabel"
OpDeleteLabelMapping = "DeleteLabelMapping"
)
// LabelService represents a service for managing resource labels
type LabelService interface {
// FindLabelByID a single label by ID.
FindLabelByID(ctx context.Context, id ID) (*Label, error)
// FindLabels returns a list of labels that match a filter
FindLabels(ctx context.Context, filter LabelFilter, opt ...FindOptions) ([]*Label, error)
// FindResourceLabels returns a list of labels that belong to a resource
FindResourceLabels(ctx context.Context, filter LabelMappingFilter) ([]*Label, error)
// CreateLabel creates a new label
CreateLabel(ctx context.Context, l *Label) error
// CreateLabel maps a resource to an existing label
CreateLabelMapping(ctx context.Context, m *LabelMapping) error
// UpdateLabel updates a label with a changeset.
UpdateLabel(ctx context.Context, id ID, upd LabelUpdate) (*Label, error)
// DeleteLabel deletes a label
DeleteLabel(ctx context.Context, id ID) error
// DeleteLabelMapping deletes a label mapping
DeleteLabelMapping(ctx context.Context, m *LabelMapping) error
}
// Label is a tag set on a resource, typically used for filtering on a UI.
type Label struct {
ID ID `json:"id,omitempty"`
Name string `json:"name"`
Properties map[string]string `json:"properties,omitempty"`
}
// Validate returns an error if the label is invalid.
func (l *Label) Validate() error {
if l.Name == "" {
return &Error{
Code: EInvalid,
Msg: "label name is required",
}
}
return nil
}
// LabelMapping is used to map resource to its labels.
// It should not be shared directly over the HTTP API.
type LabelMapping struct {
LabelID ID `json:"labelID"`
ResourceID ID
ResourceType
}
// Validate returns an error if the mapping is invalid.
func (l *LabelMapping) Validate() error {
// todo(leodido) > check LabelID is valid too?
if !l.ResourceID.Valid() {
return &Error{
Code: EInvalid,
Msg: "resourceID is required",
}
}
if err := l.ResourceType.Valid(); err != nil {
return &Error{
Code: EInvalid,
Err: err,
}
}
return nil
}
// LabelUpdate represents a changeset for a label.
// Only fields which are set are updated.
type LabelUpdate struct {
Properties map[string]string `json:"properties,omitempty"`
}
// LabelFilter represents a set of filters that restrict the returned results.
type LabelFilter struct {
ID ID
Name string
}
// LabelMappingFilter represents a set of filters that restrict the returned results.
type LabelMappingFilter struct {
ResourceID ID
ResourceType
}
fix: json tags for label mappings
Signed-off-by: Leonardo Di Donato <2012e0ca99d70aa6c893822ff73002216e5dba72@gmail.com>
package influxdb
import (
"context"
)
// ErrLabelNotFound is the error for a missing Label.
const ErrLabelNotFound = ChronografError("label not found")
const (
OpFindLabels = "FindLabels"
OpFindLabelByID = "FindLabelByID"
OpFindLabelMapping = "FindLabelMapping"
OpCreateLabel = "CreateLabel"
OpCreateLabelMapping = "CreateLabelMapping"
OpUpdateLabel = "UpdateLabel"
OpDeleteLabel = "DeleteLabel"
OpDeleteLabelMapping = "DeleteLabelMapping"
)
// LabelService represents a service for managing resource labels
type LabelService interface {
// FindLabelByID a single label by ID.
FindLabelByID(ctx context.Context, id ID) (*Label, error)
// FindLabels returns a list of labels that match a filter
FindLabels(ctx context.Context, filter LabelFilter, opt ...FindOptions) ([]*Label, error)
// FindResourceLabels returns a list of labels that belong to a resource
FindResourceLabels(ctx context.Context, filter LabelMappingFilter) ([]*Label, error)
// CreateLabel creates a new label
CreateLabel(ctx context.Context, l *Label) error
// CreateLabel maps a resource to an existing label
CreateLabelMapping(ctx context.Context, m *LabelMapping) error
// UpdateLabel updates a label with a changeset.
UpdateLabel(ctx context.Context, id ID, upd LabelUpdate) (*Label, error)
// DeleteLabel deletes a label
DeleteLabel(ctx context.Context, id ID) error
// DeleteLabelMapping deletes a label mapping
DeleteLabelMapping(ctx context.Context, m *LabelMapping) error
}
// Label is a tag set on a resource, typically used for filtering on a UI.
type Label struct {
ID ID `json:"id,omitempty"`
Name string `json:"name"`
Properties map[string]string `json:"properties,omitempty"`
}
// Validate returns an error if the label is invalid.
func (l *Label) Validate() error {
if l.Name == "" {
return &Error{
Code: EInvalid,
Msg: "label name is required",
}
}
return nil
}
// LabelMapping is used to map resource to its labels.
// It should not be shared directly over the HTTP API.
type LabelMapping struct {
LabelID ID `json:"labelID"`
ResourceID ID `json:"resourceID"`
ResourceType `json:"resourceType"`
}
// Validate returns an error if the mapping is invalid.
func (l *LabelMapping) Validate() error {
if !l.LabelID.Valid() {
return &Error{
Code: EInvalid,
Msg: "label id is required",
}
}
if !l.ResourceID.Valid() {
return &Error{
Code: EInvalid,
Msg: "resource id is required",
}
}
if err := l.ResourceType.Valid(); err != nil {
return &Error{
Code: EInvalid,
Err: err,
}
}
return nil
}
// LabelUpdate represents a changeset for a label.
// Only fields which are set are updated.
type LabelUpdate struct {
Properties map[string]string `json:"properties,omitempty"`
}
// LabelFilter represents a set of filters that restrict the returned results.
type LabelFilter struct {
ID ID
Name string
}
// LabelMappingFilter represents a set of filters that restrict the returned results.
type LabelMappingFilter struct {
ResourceID ID
ResourceType
}
|
package sexp
import (
"fmt"
"regexp"
"strconv"
)
type Item struct {
Type ItemType
Position int
Value []byte
}
type ItemType int
func (item Item) String() string {
switch item.Type {
case ItemError:
return fmt.Sprintf("Error(%v)", item.Value)
case ItemBracketLeft:
return "("
case ItemBracketRight:
return ")"
case ItemToken:
return fmt.Sprintf("Token(%v)", item.Value)
case ItemQuote:
return fmt.Sprintf("Quote(%v)", item.Value)
case ItemVerbatim:
return fmt.Sprintf("Verbatim(%v)", item.Value)
case ItemEOF:
return "EOF"
default:
return "Unknown(%v)"
}
}
const (
ItemError ItemType = iota
ItemBracketLeft // (
ItemBracketRight // )
ItemToken // abc Token.
ItemQuote // "abc" Quoted string. May also include length 3"abc"
ItemVerbatim // 3:abc Length prefixed "verbatim" encoding.
// ItemHex // #616263# Hexidecimal string.
// ItemBase64 // {MzphYmM=} Base64 of the verbatim encoding "3:abc"
// ItemBase64Octet // |YWJj| Base64 encoding of the octet-string "abc"
ItemEOF
)
var (
reBracketLeft = regexp.MustCompile(`^\(`)
reBracketRight = regexp.MustCompile(`^\)`)
reWhitespace = regexp.MustCompile(`^\s+`)
reVerbatim = regexp.MustCompile(`^(\d+):`)
reQuote = regexp.MustCompile(`^(\d+)?"((?:[^\\"]|\\.)*)"`)
// Strict(er) R.Rivset 1997 draft token + unicode letter support (hello 1997).
// reToken = regexp.MustCompile(`^[\p{L}][\p{L}\p{N}\-./_:*+=]+`)
// Instead a token can be anything including '(', ')' and ' ' so long as you escape them:
reToken = regexp.MustCompile(`^(?:[^\\ ()]|\\.)+`)
)
type stateFn func(*lexer) stateFn
type lexer struct {
input []byte
items chan Item
start int
pos int
state stateFn
matches [][]byte
}
func (l *lexer) emit(t ItemType) {
l.items <- Item{t, l.start, l.input[l.start:l.pos]}
}
func (l *lexer) Next() Item {
item := <-l.items
return item
}
func (l *lexer) scan(re *regexp.Regexp) bool {
if l.match(re) {
l.start = l.pos
l.pos += len(l.matches[0])
return true
}
return false
}
func (l *lexer) match(re *regexp.Regexp) bool {
if l.matches = re.FindSubmatch(l.input[l.pos:]); l.matches != nil {
return true
}
return false
}
func (l *lexer) run() {
for l.state = lex; l.state != nil; {
l.state = l.state(l)
}
close(l.items)
}
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
l.items <- Item{ItemError, l.start, []byte(fmt.Sprintf(format, args...))}
return nil
}
func lex(l *lexer) stateFn {
// The order is important here, reToken must come last because it'll match reVerbatim and
// reQuote atoms as well.
switch {
case l.pos >= len(l.input):
l.emit(ItemEOF)
return nil
case l.scan(reWhitespace):
return lex
case l.scan(reBracketLeft):
l.emit(ItemBracketLeft)
return lex
case l.scan(reBracketRight):
l.emit(ItemBracketRight)
return lex
case l.scan(reQuote):
// TODO: errorf if length exists and doesn't line up with quote length.
// Don't include quotes in Value.
l.items <- Item{ItemQuote, l.start, []byte(l.matches[2])}
return lex
case l.scan(reVerbatim):
bytes, _ := strconv.ParseInt(string(l.matches[1]), 10, 64)
l.start = l.pos
l.pos += int(bytes)
l.emit(ItemVerbatim)
return lex
case l.scan(reToken):
l.emit(ItemToken)
return lex
}
// TODO: Read number of runes. Reading 10 bytes may leave the last unprintable.
near := l.input[l.pos:]
if len(near) < 10 {
near = near[:len(near)]
} else {
near = near[:10]
}
return l.errorf("Unexpected byte at %d near '%s'.", l.pos, near)
}
/*
Lex S-Expressions.
See http://people.csail.mit.edu/rivest/Sexp.txt
* Unlike the R.Rivest 1997 draft tokens will match any unicode letters.
* Canonical S-Expressions may have spaces between atoms which isn't strictly correct.
*/
func NewLexer(input []byte) *lexer {
l := &lexer{input: input, items: make(chan Item)}
go l.run()
return l
}
Reorder values in ItemType, making the default value is now ItemEOF
This results in calls trying to read an ItemType from an empty
channel will now read ItemEOF instead of ItemError
package sexp
import (
"fmt"
"regexp"
"strconv"
)
type Item struct {
Type ItemType
Position int
Value []byte
}
type ItemType int
func (item Item) String() string {
switch item.Type {
case ItemError:
return fmt.Sprintf("Error(%v)", item.Value)
case ItemBracketLeft:
return "("
case ItemBracketRight:
return ")"
case ItemToken:
return fmt.Sprintf("Token(%v)", item.Value)
case ItemQuote:
return fmt.Sprintf("Quote(%v)", item.Value)
case ItemVerbatim:
return fmt.Sprintf("Verbatim(%v)", item.Value)
case ItemEOF:
return "EOF"
default:
return "Unknown(%v)"
}
}
const (
ItemEOF ItemType = iota
ItemError
ItemBracketLeft // (
ItemBracketRight // )
ItemToken // abc Token.
ItemQuote // "abc" Quoted string. May also include length 3"abc"
ItemVerbatim // 3:abc Length prefixed "verbatim" encoding.
// ItemHex // #616263# Hexidecimal string.
// ItemBase64 // {MzphYmM=} Base64 of the verbatim encoding "3:abc"
// ItemBase64Octet // |YWJj| Base64 encoding of the octet-string "abc"
)
var (
reBracketLeft = regexp.MustCompile(`^\(`)
reBracketRight = regexp.MustCompile(`^\)`)
reWhitespace = regexp.MustCompile(`^\s+`)
reVerbatim = regexp.MustCompile(`^(\d+):`)
reQuote = regexp.MustCompile(`^(\d+)?"((?:[^\\"]|\\.)*)"`)
// Strict(er) R.Rivset 1997 draft token + unicode letter support (hello 1997).
// reToken = regexp.MustCompile(`^[\p{L}][\p{L}\p{N}\-./_:*+=]+`)
// Instead a token can be anything including '(', ')' and ' ' so long as you escape them:
reToken = regexp.MustCompile(`^(?:[^\\ ()]|\\.)+`)
)
type stateFn func(*lexer) stateFn
type lexer struct {
input []byte
items chan Item
start int
pos int
state stateFn
matches [][]byte
}
func (l *lexer) emit(t ItemType) {
l.items <- Item{t, l.start, l.input[l.start:l.pos]}
}
func (l *lexer) Next() Item {
item := <-l.items
return item
}
func (l *lexer) scan(re *regexp.Regexp) bool {
if l.match(re) {
l.start = l.pos
l.pos += len(l.matches[0])
return true
}
return false
}
func (l *lexer) match(re *regexp.Regexp) bool {
if l.matches = re.FindSubmatch(l.input[l.pos:]); l.matches != nil {
return true
}
return false
}
func (l *lexer) run() {
for l.state = lex; l.state != nil; {
l.state = l.state(l)
}
close(l.items)
}
func (l *lexer) errorf(format string, args ...interface{}) stateFn {
l.items <- Item{ItemError, l.start, []byte(fmt.Sprintf(format, args...))}
return nil
}
func lex(l *lexer) stateFn {
// The order is important here, reToken must come last because it'll match reVerbatim and
// reQuote atoms as well.
switch {
case l.pos >= len(l.input):
l.emit(ItemEOF)
return nil
case l.scan(reWhitespace):
return lex
case l.scan(reBracketLeft):
l.emit(ItemBracketLeft)
return lex
case l.scan(reBracketRight):
l.emit(ItemBracketRight)
return lex
case l.scan(reQuote):
// TODO: errorf if length exists and doesn't line up with quote length.
// Don't include quotes in Value.
l.items <- Item{ItemQuote, l.start, []byte(l.matches[2])}
return lex
case l.scan(reVerbatim):
bytes, _ := strconv.ParseInt(string(l.matches[1]), 10, 64)
l.start = l.pos
l.pos += int(bytes)
l.emit(ItemVerbatim)
return lex
case l.scan(reToken):
l.emit(ItemToken)
return lex
}
// TODO: Read number of runes. Reading 10 bytes may leave the last unprintable.
near := l.input[l.pos:]
if len(near) < 10 {
near = near[:len(near)]
} else {
near = near[:10]
}
return l.errorf("Unexpected byte at %d near '%s'.", l.pos, near)
}
/*
Lex S-Expressions.
See http://people.csail.mit.edu/rivest/Sexp.txt
* Unlike the R.Rivest 1997 draft tokens will match any unicode letters.
* Canonical S-Expressions may have spaces between atoms which isn't strictly correct.
*/
func NewLexer(input []byte) *lexer {
l := &lexer{input: input, items: make(chan Item)}
go l.run()
return l
}
|
package mark
import (
"fmt"
"regexp"
"strings"
"unicode/utf8"
)
// type position
type Pos int
// itemType identifies the type of lex items.
type itemType int
// Item represent a token or text string returned from the scanner
type item struct {
typ itemType // The type of this item.
pos Pos // The starting position, in bytes, of this item in the input string.
val string // The value of this item.
}
const eof = -1 // Zero value so closed channel delivers EOF
const (
itemError itemType = iota // Error occurred; value is text of error
itemNewLine
itemHTML
// Block Elements
itemHeading
itemLHeading // Setext-style headers
itemBlockQuote
itemList
itemCodeBlock
itemGfmCodeBlock
itemHr
itemTable
itemLpTable
// Span Elements
itemText
itemLink
itemAutoLink
itemGfmLink
itemStrong
itemItalic
itemStrike
itemCode
itemImage
itemBr
itemPipe
// Indentation
itemIndent
)
var (
reEmphasise = `(?s)^_{%[1]d}(.+?(?:_{0,}))_{%[1]d}|^\*{%[1]d}(.+?(?:\*{0,}))\*{%[1]d}`
reGfmCode = `(?s)^%[1]s{3,} *(\S+)? *\n(.+?)\s*%[1]s{3,}$*(?:\n+|$)`
reLinkText = `(?:\[[^\]]*\]|[^\[\]]|\])*`
reLinkHref = `(?s)\s*<?(.*?)>?(?:\s+['"](.*?)['"])?\s*`
)
// Block Grammer
var block = map[itemType]*regexp.Regexp{
itemHeading: regexp.MustCompile(`^ *(#{1,6}) +([^\n]+?) *#* *(?:\n+|$)`),
itemLHeading: regexp.MustCompile(`^([^\n]+)\n *(=|-){2,} *(?:\n+|$)`),
itemHr: regexp.MustCompile(`^( *[-*_]){3,} *(?:\n+|$)`),
itemCodeBlock: regexp.MustCompile(`^(( {4}|\t)[^-+*(\d\.)\n]+\n*)+`),
// Backreferences is unavailable
itemGfmCodeBlock: regexp.MustCompile(fmt.Sprintf(reGfmCode, "`") + "|" + fmt.Sprintf(reGfmCode, "~")),
// `^(?:[*+-]|\d+\.) [\s\S]+?(?:\n|)`
itemList: regexp.MustCompile(`^(?:[*+-]|\d+\.) +?(?:\n|)`),
// leading-pipe table
itemLpTable: regexp.MustCompile(`^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*`),
itemTable: regexp.MustCompile(`^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*`),
}
// Inline Grammer
var span = map[itemType]*regexp.Regexp{
itemItalic: regexp.MustCompile(fmt.Sprintf(reEmphasise, 1)),
itemStrong: regexp.MustCompile(fmt.Sprintf(reEmphasise, 2)),
itemStrike: regexp.MustCompile(`(?s)^~{2}(.+?)~{2}`),
// itemMixed(e.g: ***str***, ~~*str*~~) will be part of the parser
// or we'll lex recuresively
itemCode: regexp.MustCompile("(?s)^`{1,2}\\s*(.*?[^`])\\s*`{1,2}"),
itemBr: regexp.MustCompile(`^ {2,}\n`),
// Links
itemLink: regexp.MustCompile(fmt.Sprintf(`^!?\[(%s)\]\(%s\)`, reLinkText, reLinkHref)),
itemAutoLink: regexp.MustCompile(`^<([^ >]+(@|:\/)[^ >]+)>`),
itemGfmLink: regexp.MustCompile(`^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])`),
// Image
// TODO(Ariel): DRY
itemImage: regexp.MustCompile(fmt.Sprintf(`^!?\[(%s)\]\(%s\)`, reLinkText, reLinkHref)),
}
// stateFn represents the state of the scanner as a function that returns the next state.
type stateFn func(*lexer) stateFn
// lexer holds the state of the scanner.
type lexer struct {
name string // the name of the input; used only for error reports
input string // the string being scanned
state stateFn // the next lexing function to enter
pos Pos // current position in the input
start Pos // start position of this item
width Pos // width of last rune read from input
lastPos Pos // position of most recent item returned by nextItem
items chan item // channel of scanned items
eot Pos // end of table
}
// lex creates a new lexer for the input string.
func lex(name, input string) *lexer {
l := &lexer{
name: name,
input: input,
items: make(chan item),
}
go l.run()
return l
}
// One phase lexing(inline reason)
func lexInline(input string) *lexer {
l := &lexer{
input: input,
items: make(chan item),
}
go l.lexInline()
return l
}
// run runs the state machine for the lexer.
func (l *lexer) run() {
for l.state = lexAny; l.state != nil; {
l.state = l.state(l)
}
close(l.items)
}
// next return the next rune in the input
func (l *lexer) next() rune {
if int(l.pos) >= len(l.input) {
l.width = 0
return eof
}
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
l.width = Pos(w)
l.pos += l.width
return r
}
// lexAny scans non-space items.
func lexAny(l *lexer) stateFn {
switch r := l.next(); r {
case eof:
return nil
case '*', '-', '_', '+':
p := l.peek()
if p == '*' || p == '-' || p == '_' {
l.backup()
return lexHr
} else {
l.backup()
return lexList
}
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
l.backup()
return lexList
case '<':
l.backup()
return lexHtml
case '>':
l.emit(itemBlockQuote)
return lexText
case '#':
l.backup()
return lexHeading
case ' ', '\t':
// Should be here ?
// TODO(Ariel): test that it's a codeBlock and not list for sure
if block[itemCodeBlock].MatchString(l.input[l.pos-1:]) {
l.backup()
return lexCode
}
// Keep moving forward until we get all the
// indentation size
for ; r == l.peek(); r = l.next() {
}
l.emit(itemIndent)
return lexAny
case '`', '~':
// if it's gfm-code
c := l.input[l.pos : l.pos+2]
if c == "``" || c == "~~" {
l.backup()
return lexGfmCode
}
fallthrough
case '|':
if m := block[itemLpTable].FindString(l.input[l.pos-1:]); m != "" {
l.eot = l.start + Pos(len(m))
l.emit(itemLpTable)
}
fallthrough
default:
if m := block[itemTable].FindString(l.input[l.pos-1:]); m != "" {
l.eot = l.start + Pos(len(m)) - l.width
l.emit(itemTable)
// we go one step back to get the full text
// in the lexText phase
l.start--
}
l.backup()
return lexText
}
}
// lexHeading scans heading items.
func lexHeading(l *lexer) stateFn {
if m := block[itemHeading].FindString(l.input[l.pos:]); m != "" {
// Emit without the newline(\n)
l.pos += Pos(len(m))
// TODO(Ariel): hack, fix regexp
if strings.HasSuffix(m, "\n") {
l.pos--
}
l.emit(itemHeading)
return lexAny
}
return lexText
}
// lexHr scans horizontal rules items.
func lexHr(l *lexer) stateFn {
if block[itemHr].MatchString(l.input[l.pos:]) {
match := block[itemHr].FindString(l.input[l.pos:])
l.pos += Pos(len(match))
l.emit(itemHr)
return lexAny
}
return lexText
}
// lexGfmCode scans GFM code block.
func lexGfmCode(l *lexer) stateFn {
re := block[itemGfmCodeBlock]
if re.MatchString(l.input[l.pos:]) {
match := re.FindString(l.input[l.pos:])
l.pos += Pos(len(match))
l.emit(itemGfmCodeBlock)
return lexAny
}
return lexText
}
// lexCode scans code block.
func lexCode(l *lexer) stateFn {
match := block[itemCodeBlock].FindString(l.input[l.pos:])
l.pos += Pos(len(match))
l.emit(itemCodeBlock)
return lexAny
}
// lexList scans ordered and unordered lists.
func lexList(l *lexer) stateFn {
if m := block[itemList].FindString(l.input[l.pos:]); m != "" {
l.pos += Pos(len(m))
l.emit(itemList)
}
return lexText
}
// lexText scans until end-of-line(\n)
// We have a lot of things to do in this lextext
// for example: ignore itemBr on list/tables
func lexText(l *lexer) stateFn {
// Drain text before emitting
emit := func(item itemType, pos Pos) {
if l.pos > l.start {
l.emit(itemText)
}
l.pos += pos
l.emit(item)
}
Loop:
for {
switch r := l.peek(); {
case r == eof:
emit(eof, Pos(0))
break Loop
case r == '\n':
emit(itemNewLine, l.width)
break Loop
case r == '|':
if l.eot > l.pos {
emit(itemPipe, l.width)
break
}
l.next()
default:
// Test for Setext-style headers
if m := block[itemLHeading].FindString(l.input[l.pos:]); m != "" {
emit(itemLHeading, Pos(len(m)))
break Loop
}
l.next()
}
}
return lexAny
}
// backup steps back one rune. Can only be called once per call of next.
func (l *lexer) backup() {
l.pos -= l.width
}
// peek returns but does not consume the next rune in the input.
func (l *lexer) peek() rune {
r := l.next()
l.backup()
return r
}
// emit passes an item back to the client.
func (l *lexer) emit(t itemType) {
l.items <- item{t, l.start, l.input[l.start:l.pos]}
l.start = l.pos
}
// lexItem return the next item token, clled by the parser.
func (l *lexer) nextItem() item {
item := <-l.items
l.lastPos = l.pos
return item
}
// One phase lexing(inline reason)
func (l *lexer) lexInline() {
// Drain text before emitting
emit := func(item itemType, pos Pos) {
if l.pos > l.start {
l.emit(itemText)
}
l.pos += pos
l.emit(item)
}
Loop:
for {
switch r := l.peek(); {
case r == eof:
// I don;t want to emit EOF(in inline mode)
// emit(eof, Pos(0))
l.emit(itemText)
break Loop
case r == ' ':
if m := span[itemBr].FindString(l.input[l.pos:]); m != "" {
// pos - length of new-line
emit(itemBr, Pos(len(m)))
break
}
l.next()
// if it's start as an emphasis
case r == '_', r == '*', r == '~', r == '`':
input := l.input[l.pos:]
// Strong
if m := span[itemStrong].FindString(input); m != "" {
emit(itemStrong, Pos(len(m)))
break
}
// Italic
if m := span[itemItalic].FindString(input); m != "" {
emit(itemItalic, Pos(len(m)))
break
}
// Strike
if m := span[itemStrike].FindString(input); m != "" {
emit(itemStrike, Pos(len(m)))
break
}
// InlineCode
if m := span[itemCode].FindString(input); m != "" {
emit(itemCode, Pos(len(m)))
break
}
l.next()
// itemLink, itemAutoLink, itemImage
case r == '[', r == '<', r == '!':
input := l.input[l.pos:]
if m := span[itemLink].FindString(input); m != "" {
pos := Pos(len(m))
if r == '[' {
emit(itemLink, pos)
} else {
emit(itemImage, pos)
}
break
}
if m := span[itemAutoLink].FindString(input); m != "" {
emit(itemAutoLink, Pos(len(m)))
break
}
l.next()
default:
input := l.input[l.pos:]
if m := span[itemGfmLink].FindString(input); m != "" {
emit(itemGfmLink, Pos(len(m)))
break
}
l.next()
}
}
close(l.items)
}
// lexList scans ordered and unordered lists.
func lexHtml(l *lexer) stateFn {
if match, res := l.MatchHtml(l.input[l.pos:]); match {
fmt.Println("Match:", res)
l.pos += Pos(len(res))
l.emit(itemHTML)
return lexAny
}
return lexText
}
// Test if the given input is match the HTML pattern(blocks only)
func (l *lexer) MatchHtml(input string) (bool, string) {
comment := regexp.MustCompile(`(?s)<!--.*?-->`)
if m := comment.FindString(input); m != "" {
return true, m
}
reStart := regexp.MustCompile(`^<(\w+)(?:"[^"]*"|'[^']*'|[^'">])*?>`)
// TODO: Add all span-tags and move to config.
reSpan := regexp.MustCompile(`^(a|em|strong|small|s|q|data|time|code|sub|sup|i|b|u|span|br|del|img)$`)
if m := reStart.FindStringSubmatch(input); len(m) != 0 {
el, name := m[0], m[1]
// if name is a span... is a text
if reSpan.MatchString(name) {
return false, ""
}
// if it's a self-closed html element
if strings.HasSuffix(el, "/>") {
return true, el
}
reStr := fmt.Sprintf(`(?s)(.)+?<\/%s> *(?:\n{2,}|\s*$)`, name)
reMatch, err := regexp.Compile(reStr)
if err != nil {
return false, ""
}
if m := reMatch.FindString(input); m != "" {
return true, m
}
}
return false, ""
}
feat(lexer): lexLinkDefinition
package mark
import (
"fmt"
"regexp"
"strings"
"unicode/utf8"
)
// type position
type Pos int
// itemType identifies the type of lex items.
type itemType int
// Item represent a token or text string returned from the scanner
type item struct {
typ itemType // The type of this item.
pos Pos // The starting position, in bytes, of this item in the input string.
val string // The value of this item.
}
const eof = -1 // Zero value so closed channel delivers EOF
const (
itemError itemType = iota // Error occurred; value is text of error
itemNewLine
itemHTML
itemDefLink
// Block Elements
itemHeading
itemLHeading // Setext-style headers
itemBlockQuote
itemList
itemCodeBlock
itemGfmCodeBlock
itemHr
itemTable
itemLpTable
// Span Elements
itemText
itemLink
itemAutoLink
itemGfmLink
itemStrong
itemItalic
itemStrike
itemCode
itemImage
itemBr
itemPipe
// Indentation
itemIndent
)
var (
reEmphasise = `(?s)^_{%[1]d}(.+?(?:_{0,}))_{%[1]d}|^\*{%[1]d}(.+?(?:\*{0,}))\*{%[1]d}`
reGfmCode = `(?s)^%[1]s{3,} *(\S+)? *\n(.+?)\s*%[1]s{3,}$*(?:\n+|$)`
reLinkText = `(?:\[[^\]]*\]|[^\[\]]|\])*`
reLinkHref = `(?s)\s*<?(.*?)>?(?:\s+['"](.*?)['"])?\s*`
reDefLink = `^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +["(]([^\n]+)[")])? *(?:\n+|$)`
)
// Block Grammer
var block = map[itemType]*regexp.Regexp{
itemDefLink: regexp.MustCompile(reDefLink),
itemHeading: regexp.MustCompile(`^ *(#{1,6}) +([^\n]+?) *#* *(?:\n+|$)`),
itemLHeading: regexp.MustCompile(`^([^\n]+)\n *(=|-){2,} *(?:\n+|$)`),
itemHr: regexp.MustCompile(`^( *[-*_]){3,} *(?:\n+|$)`),
itemCodeBlock: regexp.MustCompile(`^(( {4}|\t)[^-+*(\d\.)\n]+\n*)+`),
// Backreferences is unavailable
itemGfmCodeBlock: regexp.MustCompile(fmt.Sprintf(reGfmCode, "`") + "|" + fmt.Sprintf(reGfmCode, "~")),
// `^(?:[*+-]|\d+\.) [\s\S]+?(?:\n|)`
itemList: regexp.MustCompile(`^(?:[*+-]|\d+\.) +?(?:\n|)`),
// leading-pipe table
itemLpTable: regexp.MustCompile(`^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*`),
itemTable: regexp.MustCompile(`^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*`),
}
// Inline Grammer
var span = map[itemType]*regexp.Regexp{
itemItalic: regexp.MustCompile(fmt.Sprintf(reEmphasise, 1)),
itemStrong: regexp.MustCompile(fmt.Sprintf(reEmphasise, 2)),
itemStrike: regexp.MustCompile(`(?s)^~{2}(.+?)~{2}`),
// itemMixed(e.g: ***str***, ~~*str*~~) will be part of the parser
// or we'll lex recuresively
itemCode: regexp.MustCompile("(?s)^`{1,2}\\s*(.*?[^`])\\s*`{1,2}"),
itemBr: regexp.MustCompile(`^ {2,}\n`),
// Links
itemLink: regexp.MustCompile(fmt.Sprintf(`^!?\[(%s)\]\(%s\)`, reLinkText, reLinkHref)),
itemAutoLink: regexp.MustCompile(`^<([^ >]+(@|:\/)[^ >]+)>`),
itemGfmLink: regexp.MustCompile(`^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])`),
// Image
// TODO(Ariel): DRY
itemImage: regexp.MustCompile(fmt.Sprintf(`^!?\[(%s)\]\(%s\)`, reLinkText, reLinkHref)),
}
// stateFn represents the state of the scanner as a function that returns the next state.
type stateFn func(*lexer) stateFn
// lexer holds the state of the scanner.
type lexer struct {
name string // the name of the input; used only for error reports
input string // the string being scanned
state stateFn // the next lexing function to enter
pos Pos // current position in the input
start Pos // start position of this item
width Pos // width of last rune read from input
lastPos Pos // position of most recent item returned by nextItem
items chan item // channel of scanned items
eot Pos // end of table
}
// lex creates a new lexer for the input string.
func lex(name, input string) *lexer {
l := &lexer{
name: name,
input: input,
items: make(chan item),
}
go l.run()
return l
}
// One phase lexing(inline reason)
func lexInline(input string) *lexer {
l := &lexer{
input: input,
items: make(chan item),
}
go l.lexInline()
return l
}
// run runs the state machine for the lexer.
func (l *lexer) run() {
for l.state = lexAny; l.state != nil; {
l.state = l.state(l)
}
close(l.items)
}
// next return the next rune in the input
func (l *lexer) next() rune {
if int(l.pos) >= len(l.input) {
l.width = 0
return eof
}
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
l.width = Pos(w)
l.pos += l.width
return r
}
// lexAny scans non-space items.
func lexAny(l *lexer) stateFn {
switch r := l.next(); r {
case eof:
return nil
case '*', '-', '_', '+':
p := l.peek()
if p == '*' || p == '-' || p == '_' {
l.backup()
return lexHr
} else {
l.backup()
return lexList
}
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
l.backup()
return lexList
case '<':
l.backup()
return lexHtml
case '>':
l.emit(itemBlockQuote)
return lexText
case '[':
l.backup()
return lexDefLink
case '#':
l.backup()
return lexHeading
case ' ', '\t':
// Should be here ?
// TODO(Ariel): test that it's a codeBlock and not list for sure
if block[itemCodeBlock].MatchString(l.input[l.pos-1:]) {
l.backup()
return lexCode
}
// Keep moving forward until we get all the
// indentation size
for ; r == l.peek(); r = l.next() {
}
l.emit(itemIndent)
return lexAny
case '`', '~':
// if it's gfm-code
c := l.input[l.pos : l.pos+2]
if c == "``" || c == "~~" {
l.backup()
return lexGfmCode
}
fallthrough
case '|':
if m := block[itemLpTable].FindString(l.input[l.pos-1:]); m != "" {
l.eot = l.start + Pos(len(m))
l.emit(itemLpTable)
}
fallthrough
default:
if m := block[itemTable].FindString(l.input[l.pos-1:]); m != "" {
l.eot = l.start + Pos(len(m)) - l.width
l.emit(itemTable)
// we go one step back to get the full text
// in the lexText phase
l.start--
}
l.backup()
return lexText
}
}
// lexHeading scans heading items.
func lexHeading(l *lexer) stateFn {
if m := block[itemHeading].FindString(l.input[l.pos:]); m != "" {
// Emit without the newline(\n)
l.pos += Pos(len(m))
// TODO(Ariel): hack, fix regexp
if strings.HasSuffix(m, "\n") {
l.pos--
}
l.emit(itemHeading)
return lexAny
}
return lexText
}
// lexHr scans horizontal rules items.
func lexHr(l *lexer) stateFn {
if block[itemHr].MatchString(l.input[l.pos:]) {
match := block[itemHr].FindString(l.input[l.pos:])
l.pos += Pos(len(match))
l.emit(itemHr)
return lexAny
}
return lexText
}
// lexGfmCode scans GFM code block.
func lexGfmCode(l *lexer) stateFn {
re := block[itemGfmCodeBlock]
if re.MatchString(l.input[l.pos:]) {
match := re.FindString(l.input[l.pos:])
l.pos += Pos(len(match))
l.emit(itemGfmCodeBlock)
return lexAny
}
return lexText
}
// lexCode scans code block.
func lexCode(l *lexer) stateFn {
match := block[itemCodeBlock].FindString(l.input[l.pos:])
l.pos += Pos(len(match))
l.emit(itemCodeBlock)
return lexAny
}
// lexList scans ordered and unordered lists.
func lexList(l *lexer) stateFn {
if m := block[itemList].FindString(l.input[l.pos:]); m != "" {
l.pos += Pos(len(m))
l.emit(itemList)
}
return lexText
}
// lexText scans until end-of-line(\n)
// We have a lot of things to do in this lextext
// for example: ignore itemBr on list/tables
func lexText(l *lexer) stateFn {
// Drain text before emitting
emit := func(item itemType, pos Pos) {
if l.pos > l.start {
l.emit(itemText)
}
l.pos += pos
l.emit(item)
}
Loop:
for {
switch r := l.peek(); {
case r == eof:
emit(eof, Pos(0))
break Loop
case r == '\n':
emit(itemNewLine, l.width)
break Loop
case r == '|':
if l.eot > l.pos {
emit(itemPipe, l.width)
break
}
l.next()
default:
// Test for Setext-style headers
if m := block[itemLHeading].FindString(l.input[l.pos:]); m != "" {
emit(itemLHeading, Pos(len(m)))
break Loop
}
l.next()
}
}
return lexAny
}
// backup steps back one rune. Can only be called once per call of next.
func (l *lexer) backup() {
l.pos -= l.width
}
// peek returns but does not consume the next rune in the input.
func (l *lexer) peek() rune {
r := l.next()
l.backup()
return r
}
// emit passes an item back to the client.
func (l *lexer) emit(t itemType) {
l.items <- item{t, l.start, l.input[l.start:l.pos]}
l.start = l.pos
}
// lexItem return the next item token, clled by the parser.
func (l *lexer) nextItem() item {
item := <-l.items
l.lastPos = l.pos
return item
}
// One phase lexing(inline reason)
func (l *lexer) lexInline() {
// Drain text before emitting
emit := func(item itemType, pos Pos) {
if l.pos > l.start {
l.emit(itemText)
}
l.pos += pos
l.emit(item)
}
Loop:
for {
switch r := l.peek(); {
case r == eof:
// I don;t want to emit EOF(in inline mode)
// emit(eof, Pos(0))
l.emit(itemText)
break Loop
case r == ' ':
if m := span[itemBr].FindString(l.input[l.pos:]); m != "" {
// pos - length of new-line
emit(itemBr, Pos(len(m)))
break
}
l.next()
// if it's start as an emphasis
case r == '_', r == '*', r == '~', r == '`':
input := l.input[l.pos:]
// Strong
if m := span[itemStrong].FindString(input); m != "" {
emit(itemStrong, Pos(len(m)))
break
}
// Italic
if m := span[itemItalic].FindString(input); m != "" {
emit(itemItalic, Pos(len(m)))
break
}
// Strike
if m := span[itemStrike].FindString(input); m != "" {
emit(itemStrike, Pos(len(m)))
break
}
// InlineCode
if m := span[itemCode].FindString(input); m != "" {
emit(itemCode, Pos(len(m)))
break
}
l.next()
// itemLink, itemAutoLink, itemImage
case r == '[', r == '<', r == '!':
input := l.input[l.pos:]
if m := span[itemLink].FindString(input); m != "" {
pos := Pos(len(m))
if r == '[' {
emit(itemLink, pos)
} else {
emit(itemImage, pos)
}
break
}
if m := span[itemAutoLink].FindString(input); m != "" {
emit(itemAutoLink, Pos(len(m)))
break
}
l.next()
default:
input := l.input[l.pos:]
if m := span[itemGfmLink].FindString(input); m != "" {
emit(itemGfmLink, Pos(len(m)))
break
}
l.next()
}
}
close(l.items)
}
// lexList scans ordered and unordered lists.
func lexHtml(l *lexer) stateFn {
if match, res := l.MatchHtml(l.input[l.pos:]); match {
l.pos += Pos(len(res))
l.emit(itemHTML)
return lexAny
}
return lexText
}
// Test if the given input is match the HTML pattern(blocks only)
func (l *lexer) MatchHtml(input string) (bool, string) {
comment := regexp.MustCompile(`(?s)<!--.*?-->`)
if m := comment.FindString(input); m != "" {
return true, m
}
reStart := regexp.MustCompile(`^<(\w+)(?:"[^"]*"|'[^']*'|[^'">])*?>`)
// TODO: Add all span-tags and move to config.
reSpan := regexp.MustCompile(`^(a|em|strong|small|s|q|data|time|code|sub|sup|i|b|u|span|br|del|img)$`)
if m := reStart.FindStringSubmatch(input); len(m) != 0 {
el, name := m[0], m[1]
// if name is a span... is a text
if reSpan.MatchString(name) {
return false, ""
}
// if it's a self-closed html element
if strings.HasSuffix(el, "/>") {
return true, el
}
reStr := fmt.Sprintf(`(?s)(.)+?<\/%s> *(?:\n{2,}|\s*$)`, name)
reMatch, err := regexp.Compile(reStr)
if err != nil {
return false, ""
}
if m := reMatch.FindString(input); m != "" {
return true, m
}
}
return false, ""
}
// lexDefLink scans link definition
func lexDefLink(l *lexer) stateFn {
if m := block[itemDefLink].FindString(l.input[l.pos:]); m != "" {
l.pos += Pos(len(m))
l.emit(itemDefLink)
return lexAny
}
return lexText
}
|
// Copyright 2017 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package frontend
import (
"bytes"
"fmt"
"html/template"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"golang.org/x/net/context"
"go.chromium.org/gae/service/info"
"go.chromium.org/luci/auth/identity"
"go.chromium.org/luci/common/clock"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/server/analytics"
"go.chromium.org/luci/server/auth"
"go.chromium.org/luci/server/router"
"go.chromium.org/luci/server/templates"
"go.chromium.org/luci/milo/buildsource/buildbot/buildstore"
"go.chromium.org/luci/milo/common"
"go.chromium.org/luci/milo/frontend/ui"
"go.chromium.org/luci/milo/git"
)
// A collection of useful templating functions
// funcMap is what gets fed into the template bundle.
var funcMap = template.FuncMap{
"faviconMIMEType": faviconMIMEType,
"formatCommitDesc": formatCommitDesc,
"formatTime": formatTime,
"humanDuration": humanDuration,
"localTime": localTime,
"localTimeTooltip": localTimeTooltip,
"obfuscateEmail": obfuscateEmail,
"pagedURL": pagedURL,
"parseRFC3339": parseRFC3339,
"percent": percent,
"prefix": prefix,
"shortenEmail": shortenEmail,
"startswith": strings.HasPrefix,
"sub": sub,
"toLower": strings.ToLower,
}
// localTime returns a <span> element with t in human format
// that will be converted to local timezone in the browser.
// Recommended usage: {{ .Date | localTime "N/A" }}
func localTime(ifZero string, t time.Time) template.HTML {
return localTimeCommon(ifZero, t, "", t.Format(time.RFC850))
}
// localTimeTooltip is similar to localTime, but shows time in a tooltip and
// allows to specify inner text to be added to the created <span> element.
// Recommended usage: {{ .Date | localTimeTooltip "innerText" "N/A" }}
func localTimeTooltip(innerText string, ifZero string, t time.Time) template.HTML {
return localTimeCommon(ifZero, t, "tooltip-only", innerText)
}
func localTimeCommon(ifZero string, t time.Time, tooltipClass string, innerText string) template.HTML {
if t.IsZero() {
return template.HTML(template.HTMLEscapeString(ifZero))
}
milliseconds := t.UnixNano() / 1e6
return template.HTML(fmt.Sprintf(
`<span class="local-time %s" data-timestamp="%d">%s</span>`,
tooltipClass,
milliseconds,
template.HTMLEscapeString(innerText)))
}
// rURL matches anything that looks like an https:// URL.
var rURL = regexp.MustCompile(`\bhttps://\S*\b`)
// rBUGLINE matches a bug line in a commit, including if it is quoted.
// Expected formats: "BUG: 1234,1234", "bugs=1234", " > > BUG: 123"
// We use > for > because this needs to deal with HTML escaped text.
var rBUGLINE = regexp.MustCompile(`(?m)^(>| )*(?i:bugs?)[:=].+$`)
// rBUG matches expected items in a bug line. Expected format: 12345, project:12345, #12345
var rBUG = regexp.MustCompile(`\b(\w+:)?#?\d+\b`)
// Expected formats: b/123456, crbug/123456, crbug/project/123456, crbug:123456, etc.
var rBUGLINK = regexp.MustCompile(`\b(b|crbug(\.com)?([:/]\w+)?)[:/]\d+\b`)
// tURL is a URL template.
var tURL = template.Must(template.New("tURL").Parse("<a href=\"{{.URL}}\">{{.Label}}</a>"))
type link struct {
Label string
URL string
}
func makeLink(label, href string) string {
buf := bytes.Buffer{}
if err := tURL.Execute(&buf, link{label, href}); err != nil {
return label
}
return buf.String()
}
// formatCommitDesc takes a commit message and adds embellishments such as:
// * Linkify https:// URLs
// * Linkify bug numbers using https://crbug.com/
// * Linkify b/ bug links
// * Linkify crbug/ bug links
func formatCommitDesc(desc string) template.HTML {
// Since we take in a string and return a trusted raw HTML string, escape
// everything first.
desc = template.HTMLEscapeString(desc)
// Replace https:// URLs
result := rURL.ReplaceAllStringFunc(desc, func(s string) string {
return makeLink(s, s)
})
// Replace b/ and crbug/ URLs
result = rBUGLINK.ReplaceAllStringFunc(result, func(s string) string {
// Normalize separator.
u := strings.Replace(s, ":", "/", -1)
u = strings.Replace(u, "crbug/", "crbug.com/", 1)
scheme := "https://"
if strings.HasPrefix(u, "b/") {
scheme = "http://"
}
return makeLink(s, scheme+u)
})
// Replace BUG: lines with URLs by rewriting all bug numbers with links.
return template.HTML(rBUGLINE.ReplaceAllStringFunc(result, func(s string) string {
return rBUG.ReplaceAllStringFunc(s, func(sBug string) string {
path := strings.Replace(strings.Replace(sBug, "#", "", 1), ":", "/", 1)
return makeLink(sBug, "https://crbug.com/"+path)
})
}))
}
// humanDuration translates d into a human readable string of x units y units,
// where x and y could be in days, hours, minutes, or seconds, whichever is the
// largest.
func humanDuration(d time.Duration) string {
t := int64(d.Seconds())
day := t / 86400
hr := (t % 86400) / 3600
if day > 0 {
if hr != 0 {
return fmt.Sprintf("%d days %d hrs", day, hr)
}
return fmt.Sprintf("%d days", day)
}
min := (t % 3600) / 60
if hr > 0 {
if min != 0 {
return fmt.Sprintf("%d hrs %d mins", hr, min)
}
return fmt.Sprintf("%d hrs", hr)
}
sec := t % 60
if min > 0 {
if sec != 0 {
return fmt.Sprintf("%d mins %d secs", min, sec)
}
return fmt.Sprintf("%d mins", min)
}
if sec != 0 {
return fmt.Sprintf("%d secs", sec)
}
if d > time.Millisecond {
return fmt.Sprintf("%d ms", d/time.Millisecond)
}
return "0"
}
// obfuscateEmail converts a string containing email adddress email@address.com
// into email<junk>@address.com.
func obfuscateEmail(email string) template.HTML {
email = template.HTMLEscapeString(email)
return template.HTML(strings.Replace(
email, "@", "<span style=\"display:none\">ohnoyoudont</span>@", -1))
}
// parseRFC3339 parses time represented as a RFC3339 or RFC3339Nano string.
// If cannot parse, returns zero time.
func parseRFC3339(s string) time.Time {
t, err := time.Parse(time.RFC3339, s)
if err == nil {
return t
}
t, err = time.Parse(time.RFC3339Nano, s)
if err == nil {
return t
}
return time.Time{}
}
// formatTime takes a time object and returns a formatted RFC3339 string.
func formatTime(t time.Time) string {
return t.Format(time.RFC3339)
}
// sub subtracts one number from another, because apparently go templates aren't
// smart enough to do that.
func sub(a, b int) int {
return a - b
}
// shortenEmail shortens Google emails.
func shortenEmail(email string) string {
return strings.Replace(email, "@google.com", "", -1)
}
// prefix abbriviates a string into specified number of characters.
// Recommended usage: {{ .GitHash | prefix 8 }}
func prefix(prefixLen int, s string) string {
if len(s) > prefixLen {
return s[:prefixLen]
}
return s
}
// GetLimit extracts the "limit", "numbuilds", or "num_builds" http param from
// the request, or returns def implying no limit was specified.
func GetLimit(r *http.Request, def int) int {
sLimit := r.FormValue("limit")
if sLimit == "" {
sLimit = r.FormValue("numbuilds")
if sLimit == "" {
sLimit = r.FormValue("num_builds")
if sLimit == "" {
return def
}
}
}
limit, err := strconv.Atoi(sLimit)
if err != nil || limit < 0 {
return def
}
return limit
}
// GetReload extracts the "reload" http param from the request,
// or returns def implying no limit was specified.
func GetReload(r *http.Request, def int) int {
sReload := r.FormValue("reload")
if sReload == "" {
return def
}
refresh, err := strconv.Atoi(sReload)
if err != nil || refresh < 0 {
return def
}
return refresh
}
// pagedURL returns a self URL with the given cursor and limit paging options.
// if limit is set to 0, then inherit whatever limit is set in request. If
// both are unspecified, then limit is omitted.
func pagedURL(r *http.Request, limit int, cursor string) string {
if limit == 0 {
limit = GetLimit(r, -1)
if limit < 0 {
limit = 0
}
}
values := r.URL.Query()
switch cursor {
case "EMPTY":
values.Del("cursor")
case "":
// Do nothing, just leave the cursor in.
default:
values.Set("cursor", cursor)
}
switch {
case limit < 0:
values.Del("limit")
case limit > 0:
values.Set("limit", fmt.Sprintf("%d", limit))
}
result := *r.URL
result.RawQuery = values.Encode()
return result.String()
}
// percent divides one number by a divisor and returns the percentage in string form.
func percent(numerator, divisor int) string {
p := float64(numerator) * 100.0 / float64(divisor)
return fmt.Sprintf("%.1f", p)
}
// faviconMIMEType derives the MIME type from a URL's file extension. Only valid
// favicon image formats are supported.
func faviconMIMEType(fileURL string) string {
switch {
case strings.HasSuffix(fileURL, ".png"):
return "image/png"
case strings.HasSuffix(fileURL, ".ico"):
return "image/ico"
case strings.HasSuffix(fileURL, ".jpeg"):
fallthrough
case strings.HasSuffix(fileURL, ".jpg"):
return "image/jpeg"
case strings.HasSuffix(fileURL, ".gif"):
return "image/gif"
}
return ""
}
// getTemplateBundles is used to render HTML templates. It provides base args
// passed to all templates. It takes a path to the template folder, relative
// to the path of the binary during runtime.
func getTemplateBundle(templatePath string) *templates.Bundle {
return &templates.Bundle{
Loader: templates.FileSystemLoader(templatePath),
DebugMode: info.IsDevAppServer,
DefaultTemplate: "base",
DefaultArgs: func(c context.Context, e *templates.Extra) (templates.Args, error) {
loginURL, err := auth.LoginURL(c, e.Request.URL.RequestURI())
if err != nil {
return nil, err
}
logoutURL, err := auth.LogoutURL(c, e.Request.URL.RequestURI())
if err != nil {
return nil, err
}
project := e.Params.ByName("project")
group := e.Params.ByName("group")
return templates.Args{
"AppVersion": strings.Split(info.VersionID(c), ".")[0],
"IsAnonymous": auth.CurrentIdentity(c) == identity.AnonymousIdentity,
"User": auth.CurrentUser(c),
"LoginURL": loginURL,
"LogoutURL": logoutURL,
"CurrentTime": clock.Now(c),
"Analytics": analytics.Snippet(c),
"RequestID": info.RequestID(c),
"Request": e.Request,
"Navi": ProjectLinks(project, group),
"ProjectID": project,
}, nil
},
FuncMap: funcMap,
}
}
// withGitilesMiddleware is a middleware that installs a prod Gitiles client
// factory into the context.
func withGitilesMiddleware(c *router.Context, next router.Handler) {
c.Context = git.UseFactory(c.Context, git.AuthenticatedProdClient)
next(c)
}
// withAccessClientMiddleware is a middleware that installs a prod buildbucket
// access API client into the context.
//
// This middleware depends on auth middleware in order to generate the access
// client.
func withAccessClientMiddleware(c *router.Context, next router.Handler) {
client, err := common.NewAccessClient(c.Context)
if err != nil {
ErrorHandler(c, err)
return
}
c.Context = common.WithAccessClient(c.Context, client)
next(c)
}
// projectACLMiddleware adds ACL checks on a per-project basis.
// Expects c.Params to have project parameter.
func projectACLMiddleware(c *router.Context, next router.Handler) {
switch allowed, err := common.IsAllowed(c.Context, c.Params.ByName("project")); {
case err != nil:
ErrorHandler(c, err)
case !allowed:
ErrorHandler(c, errors.New("no access to project", common.CodeNoAccess))
default:
next(c)
}
}
// emulationMiddleware enables buildstore emulation if "emulation" query
// string parameter is not empty.
func emulationMiddleware(c *router.Context, next router.Handler) {
c.Context = buildstore.WithEmulation(c.Context, c.Request.FormValue("emulation") != "")
next(c)
}
// ProjectLink returns the navigation list surrounding a project and optionally group.
func ProjectLinks(project, group string) []ui.LinkGroup {
if project == "" {
return nil
}
projLinks := []*ui.Link{
ui.NewLink(
"Builders",
fmt.Sprintf("/p/%s/builders", project),
fmt.Sprintf("All builders for project %s", project))}
links := []ui.LinkGroup{
{
Name: ui.NewLink(
project,
fmt.Sprintf("/p/%s", project),
fmt.Sprintf("Project page for %s", project)),
Links: projLinks,
},
}
if group != "" {
groupLinks := []*ui.Link{
ui.NewLink(
"Console",
fmt.Sprintf("/p/%s/g/%s/console", project, group),
fmt.Sprintf("Console for group %s in project %s", group, project)),
ui.NewLink(
"Builders",
fmt.Sprintf("/p/%s/g/%s/builders", project, group),
fmt.Sprintf("Builders for group %s in project %s", group, project)),
}
links = append(links, ui.LinkGroup{
Name: ui.NewLink(group, "", ""),
Links: groupLinks,
})
}
return links
}
Only display Console link if console is enabled
R=a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org
Bug: 828790
Change-Id: I17bee0842a7765b6770587167a29218885055d38
Reviewed-on: https://chromium-review.googlesource.com/1025998
Commit-Queue: Andrii Shyshkalov <a30c74fa30536fe7ea81ed6dec202e35e149e1fd@chromium.org>
Reviewed-by: Ryan Tseng <2190fe9a39b4920ec7bd59c848ec5cccd163056f@chromium.org>
// Copyright 2017 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package frontend
import (
"bytes"
"fmt"
"html/template"
"net/http"
"regexp"
"strconv"
"strings"
"time"
"golang.org/x/net/context"
"go.chromium.org/gae/service/info"
"go.chromium.org/luci/auth/identity"
"go.chromium.org/luci/common/clock"
"go.chromium.org/luci/common/errors"
"go.chromium.org/luci/common/logging"
"go.chromium.org/luci/server/analytics"
"go.chromium.org/luci/server/auth"
"go.chromium.org/luci/server/router"
"go.chromium.org/luci/server/templates"
"go.chromium.org/luci/milo/buildsource/buildbot/buildstore"
"go.chromium.org/luci/milo/common"
"go.chromium.org/luci/milo/frontend/ui"
"go.chromium.org/luci/milo/git"
)
// A collection of useful templating functions
// funcMap is what gets fed into the template bundle.
var funcMap = template.FuncMap{
"faviconMIMEType": faviconMIMEType,
"formatCommitDesc": formatCommitDesc,
"formatTime": formatTime,
"humanDuration": humanDuration,
"localTime": localTime,
"localTimeTooltip": localTimeTooltip,
"obfuscateEmail": obfuscateEmail,
"pagedURL": pagedURL,
"parseRFC3339": parseRFC3339,
"percent": percent,
"prefix": prefix,
"shortenEmail": shortenEmail,
"startswith": strings.HasPrefix,
"sub": sub,
"toLower": strings.ToLower,
}
// localTime returns a <span> element with t in human format
// that will be converted to local timezone in the browser.
// Recommended usage: {{ .Date | localTime "N/A" }}
func localTime(ifZero string, t time.Time) template.HTML {
return localTimeCommon(ifZero, t, "", t.Format(time.RFC850))
}
// localTimeTooltip is similar to localTime, but shows time in a tooltip and
// allows to specify inner text to be added to the created <span> element.
// Recommended usage: {{ .Date | localTimeTooltip "innerText" "N/A" }}
func localTimeTooltip(innerText string, ifZero string, t time.Time) template.HTML {
return localTimeCommon(ifZero, t, "tooltip-only", innerText)
}
func localTimeCommon(ifZero string, t time.Time, tooltipClass string, innerText string) template.HTML {
if t.IsZero() {
return template.HTML(template.HTMLEscapeString(ifZero))
}
milliseconds := t.UnixNano() / 1e6
return template.HTML(fmt.Sprintf(
`<span class="local-time %s" data-timestamp="%d">%s</span>`,
tooltipClass,
milliseconds,
template.HTMLEscapeString(innerText)))
}
// rURL matches anything that looks like an https:// URL.
var rURL = regexp.MustCompile(`\bhttps://\S*\b`)
// rBUGLINE matches a bug line in a commit, including if it is quoted.
// Expected formats: "BUG: 1234,1234", "bugs=1234", " > > BUG: 123"
// We use > for > because this needs to deal with HTML escaped text.
var rBUGLINE = regexp.MustCompile(`(?m)^(>| )*(?i:bugs?)[:=].+$`)
// rBUG matches expected items in a bug line. Expected format: 12345, project:12345, #12345
var rBUG = regexp.MustCompile(`\b(\w+:)?#?\d+\b`)
// Expected formats: b/123456, crbug/123456, crbug/project/123456, crbug:123456, etc.
var rBUGLINK = regexp.MustCompile(`\b(b|crbug(\.com)?([:/]\w+)?)[:/]\d+\b`)
// tURL is a URL template.
var tURL = template.Must(template.New("tURL").Parse("<a href=\"{{.URL}}\">{{.Label}}</a>"))
type link struct {
Label string
URL string
}
func makeLink(label, href string) string {
buf := bytes.Buffer{}
if err := tURL.Execute(&buf, link{label, href}); err != nil {
return label
}
return buf.String()
}
// formatCommitDesc takes a commit message and adds embellishments such as:
// * Linkify https:// URLs
// * Linkify bug numbers using https://crbug.com/
// * Linkify b/ bug links
// * Linkify crbug/ bug links
func formatCommitDesc(desc string) template.HTML {
// Since we take in a string and return a trusted raw HTML string, escape
// everything first.
desc = template.HTMLEscapeString(desc)
// Replace https:// URLs
result := rURL.ReplaceAllStringFunc(desc, func(s string) string {
return makeLink(s, s)
})
// Replace b/ and crbug/ URLs
result = rBUGLINK.ReplaceAllStringFunc(result, func(s string) string {
// Normalize separator.
u := strings.Replace(s, ":", "/", -1)
u = strings.Replace(u, "crbug/", "crbug.com/", 1)
scheme := "https://"
if strings.HasPrefix(u, "b/") {
scheme = "http://"
}
return makeLink(s, scheme+u)
})
// Replace BUG: lines with URLs by rewriting all bug numbers with links.
return template.HTML(rBUGLINE.ReplaceAllStringFunc(result, func(s string) string {
return rBUG.ReplaceAllStringFunc(s, func(sBug string) string {
path := strings.Replace(strings.Replace(sBug, "#", "", 1), ":", "/", 1)
return makeLink(sBug, "https://crbug.com/"+path)
})
}))
}
// humanDuration translates d into a human readable string of x units y units,
// where x and y could be in days, hours, minutes, or seconds, whichever is the
// largest.
func humanDuration(d time.Duration) string {
t := int64(d.Seconds())
day := t / 86400
hr := (t % 86400) / 3600
if day > 0 {
if hr != 0 {
return fmt.Sprintf("%d days %d hrs", day, hr)
}
return fmt.Sprintf("%d days", day)
}
min := (t % 3600) / 60
if hr > 0 {
if min != 0 {
return fmt.Sprintf("%d hrs %d mins", hr, min)
}
return fmt.Sprintf("%d hrs", hr)
}
sec := t % 60
if min > 0 {
if sec != 0 {
return fmt.Sprintf("%d mins %d secs", min, sec)
}
return fmt.Sprintf("%d mins", min)
}
if sec != 0 {
return fmt.Sprintf("%d secs", sec)
}
if d > time.Millisecond {
return fmt.Sprintf("%d ms", d/time.Millisecond)
}
return "0"
}
// obfuscateEmail converts a string containing email adddress email@address.com
// into email<junk>@address.com.
func obfuscateEmail(email string) template.HTML {
email = template.HTMLEscapeString(email)
return template.HTML(strings.Replace(
email, "@", "<span style=\"display:none\">ohnoyoudont</span>@", -1))
}
// parseRFC3339 parses time represented as a RFC3339 or RFC3339Nano string.
// If cannot parse, returns zero time.
func parseRFC3339(s string) time.Time {
t, err := time.Parse(time.RFC3339, s)
if err == nil {
return t
}
t, err = time.Parse(time.RFC3339Nano, s)
if err == nil {
return t
}
return time.Time{}
}
// formatTime takes a time object and returns a formatted RFC3339 string.
func formatTime(t time.Time) string {
return t.Format(time.RFC3339)
}
// sub subtracts one number from another, because apparently go templates aren't
// smart enough to do that.
func sub(a, b int) int {
return a - b
}
// shortenEmail shortens Google emails.
func shortenEmail(email string) string {
return strings.Replace(email, "@google.com", "", -1)
}
// prefix abbriviates a string into specified number of characters.
// Recommended usage: {{ .GitHash | prefix 8 }}
func prefix(prefixLen int, s string) string {
if len(s) > prefixLen {
return s[:prefixLen]
}
return s
}
// GetLimit extracts the "limit", "numbuilds", or "num_builds" http param from
// the request, or returns def implying no limit was specified.
func GetLimit(r *http.Request, def int) int {
sLimit := r.FormValue("limit")
if sLimit == "" {
sLimit = r.FormValue("numbuilds")
if sLimit == "" {
sLimit = r.FormValue("num_builds")
if sLimit == "" {
return def
}
}
}
limit, err := strconv.Atoi(sLimit)
if err != nil || limit < 0 {
return def
}
return limit
}
// GetReload extracts the "reload" http param from the request,
// or returns def implying no limit was specified.
func GetReload(r *http.Request, def int) int {
sReload := r.FormValue("reload")
if sReload == "" {
return def
}
refresh, err := strconv.Atoi(sReload)
if err != nil || refresh < 0 {
return def
}
return refresh
}
// pagedURL returns a self URL with the given cursor and limit paging options.
// if limit is set to 0, then inherit whatever limit is set in request. If
// both are unspecified, then limit is omitted.
func pagedURL(r *http.Request, limit int, cursor string) string {
if limit == 0 {
limit = GetLimit(r, -1)
if limit < 0 {
limit = 0
}
}
values := r.URL.Query()
switch cursor {
case "EMPTY":
values.Del("cursor")
case "":
// Do nothing, just leave the cursor in.
default:
values.Set("cursor", cursor)
}
switch {
case limit < 0:
values.Del("limit")
case limit > 0:
values.Set("limit", fmt.Sprintf("%d", limit))
}
result := *r.URL
result.RawQuery = values.Encode()
return result.String()
}
// percent divides one number by a divisor and returns the percentage in string form.
func percent(numerator, divisor int) string {
p := float64(numerator) * 100.0 / float64(divisor)
return fmt.Sprintf("%.1f", p)
}
// faviconMIMEType derives the MIME type from a URL's file extension. Only valid
// favicon image formats are supported.
func faviconMIMEType(fileURL string) string {
switch {
case strings.HasSuffix(fileURL, ".png"):
return "image/png"
case strings.HasSuffix(fileURL, ".ico"):
return "image/ico"
case strings.HasSuffix(fileURL, ".jpeg"):
fallthrough
case strings.HasSuffix(fileURL, ".jpg"):
return "image/jpeg"
case strings.HasSuffix(fileURL, ".gif"):
return "image/gif"
}
return ""
}
// getTemplateBundles is used to render HTML templates. It provides base args
// passed to all templates. It takes a path to the template folder, relative
// to the path of the binary during runtime.
func getTemplateBundle(templatePath string) *templates.Bundle {
return &templates.Bundle{
Loader: templates.FileSystemLoader(templatePath),
DebugMode: info.IsDevAppServer,
DefaultTemplate: "base",
DefaultArgs: func(c context.Context, e *templates.Extra) (templates.Args, error) {
loginURL, err := auth.LoginURL(c, e.Request.URL.RequestURI())
if err != nil {
return nil, err
}
logoutURL, err := auth.LogoutURL(c, e.Request.URL.RequestURI())
if err != nil {
return nil, err
}
project := e.Params.ByName("project")
group := e.Params.ByName("group")
return templates.Args{
"AppVersion": strings.Split(info.VersionID(c), ".")[0],
"IsAnonymous": auth.CurrentIdentity(c) == identity.AnonymousIdentity,
"User": auth.CurrentUser(c),
"LoginURL": loginURL,
"LogoutURL": logoutURL,
"CurrentTime": clock.Now(c),
"Analytics": analytics.Snippet(c),
"RequestID": info.RequestID(c),
"Request": e.Request,
"Navi": ProjectLinks(c, project, group),
"ProjectID": project,
}, nil
},
FuncMap: funcMap,
}
}
// withGitilesMiddleware is a middleware that installs a prod Gitiles client
// factory into the context.
func withGitilesMiddleware(c *router.Context, next router.Handler) {
c.Context = git.UseFactory(c.Context, git.AuthenticatedProdClient)
next(c)
}
// withAccessClientMiddleware is a middleware that installs a prod buildbucket
// access API client into the context.
//
// This middleware depends on auth middleware in order to generate the access
// client.
func withAccessClientMiddleware(c *router.Context, next router.Handler) {
client, err := common.NewAccessClient(c.Context)
if err != nil {
ErrorHandler(c, err)
return
}
c.Context = common.WithAccessClient(c.Context, client)
next(c)
}
// projectACLMiddleware adds ACL checks on a per-project basis.
// Expects c.Params to have project parameter.
func projectACLMiddleware(c *router.Context, next router.Handler) {
switch allowed, err := common.IsAllowed(c.Context, c.Params.ByName("project")); {
case err != nil:
ErrorHandler(c, err)
case !allowed:
ErrorHandler(c, errors.New("no access to project", common.CodeNoAccess))
default:
next(c)
}
}
// emulationMiddleware enables buildstore emulation if "emulation" query
// string parameter is not empty.
func emulationMiddleware(c *router.Context, next router.Handler) {
c.Context = buildstore.WithEmulation(c.Context, c.Request.FormValue("emulation") != "")
next(c)
}
// ProjectLinks returns the navigation list surrounding a project and optionally group.
func ProjectLinks(c context.Context, project, group string) []ui.LinkGroup {
if project == "" {
return nil
}
projLinks := []*ui.Link{
ui.NewLink(
"Builders",
fmt.Sprintf("/p/%s/builders", project),
fmt.Sprintf("All builders for project %s", project))}
links := []ui.LinkGroup{
{
Name: ui.NewLink(
project,
fmt.Sprintf("/p/%s", project),
fmt.Sprintf("Project page for %s", project)),
Links: projLinks,
},
}
if group != "" {
groupLinks := []*ui.Link{}
con, err := common.GetConsole(c, project, group)
if err != nil {
logging.WithError(err).Warningf(c, "error getting console")
} else if !con.Def.BuilderViewOnly {
groupLinks = append(groupLinks, ui.NewLink(
"Console",
fmt.Sprintf("/p/%s/g/%s/console", project, group),
fmt.Sprintf("Console for group %s in project %s", group, project)))
}
groupLinks = append(groupLinks, ui.NewLink(
"Builders",
fmt.Sprintf("/p/%s/g/%s/builders", project, group),
fmt.Sprintf("Builders for group %s in project %s", group, project)))
links = append(links, ui.LinkGroup{
Name: ui.NewLink(group, "", ""),
Links: groupLinks,
})
}
return links
}
|
package sdk
/*
Copyright 2016 Alexander I.Grafov <grafov@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ॐ तारे तुत्तारे तुरे स्व
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/url"
"strings"
"time"
)
// BoardProperties keeps metadata of a dashboard.
type BoardProperties struct {
IsStarred bool `json:"isStarred,omitempty"`
IsHome bool `json:"isHome,omitempty"`
IsSnapshot bool `json:"isSnapshot,omitempty"`
Type string `json:"type,omitempty"`
CanSave bool `json:"canSave"`
CanEdit bool `json:"canEdit"`
CanStar bool `json:"canStar"`
Slug string `json:"slug"`
Expires time.Time `json:"expires"`
Created time.Time `json:"created"`
Updated time.Time `json:"updated"`
UpdatedBy string `json:"updatedBy"`
CreatedBy string `json:"createdBy"`
Version int `json:"version"`
}
// GetDashboard loads a dashboard from Grafana instance along with metadata for a dashboard.
// For dashboards from a filesystem set "file/" prefix for slug. By default dashboards from
// a database assumed. Database dashboards may have "db/" prefix or may have not, it will
// be appended automatically.
func (r *Client) GetDashboard(slug string) (Board, BoardProperties, error) {
var (
raw []byte
result struct {
Meta BoardProperties `json:"meta"`
Board Board `json:"dashboard"`
}
code int
err error
)
slug, _ = setPrefix(slug)
if raw, code, err = r.get(fmt.Sprintf("api/dashboards/%s", slug), nil); err != nil {
return Board{}, BoardProperties{}, err
}
if code != 200 {
return Board{}, BoardProperties{}, fmt.Errorf("HTTP error %d: returns %s", code, raw)
}
dec := json.NewDecoder(bytes.NewReader(raw))
dec.UseNumber()
if err := dec.Decode(&result); err != nil {
return Board{}, BoardProperties{}, fmt.Errorf("unmarshal board with meta: %s\n%s", err, raw)
}
return result.Board, result.Meta, err
}
// GetRawDashboard loads a dashboard JSON from Grafana instance along with metadata for a dashboard.
// Contrary to GetDashboard() it not unpack loaded JSON to Board structure. Instead it
// returns it as byte slice. It guarantee that data of dashboard returned untouched by conversion
// with Board so no matter how properly fields from a current version of Grafana mapped to
// our Board fields. It useful for backuping purposes when you want a dashboard exactly with
// same data as it exported by Grafana.
//
// For dashboards from a filesystem set "file/" prefix for slug. By default dashboards from
// a database assumed. Database dashboards may have "db/" prefix or may have not, it will
// be appended automatically.
func (r *Client) GetRawDashboard(slug string) ([]byte, BoardProperties, error) {
var (
raw []byte
result struct {
Meta BoardProperties `json:"meta"`
Board json.RawMessage `json:"dashboard"`
}
code int
err error
)
slug, _ = setPrefix(slug)
if raw, code, err = r.get(fmt.Sprintf("api/dashboards/%s", slug), nil); err != nil {
return nil, BoardProperties{}, err
}
if code != 200 {
return nil, BoardProperties{}, fmt.Errorf("HTTP error %d: returns %s", code, raw)
}
dec := json.NewDecoder(bytes.NewReader(raw))
dec.UseNumber()
if err := dec.Decode(&result); err != nil {
return nil, BoardProperties{}, fmt.Errorf("unmarshal board with meta: %s\n%s", err, raw)
}
return []byte(result.Board), result.Meta, err
}
// FoundBoard keeps result of search with metadata of a dashboard.
type FoundBoard struct {
ID uint `json:"id"`
Title string `json:"title"`
URI string `json:"uri"`
Type string `json:"type"`
Tags []string `json:"tags"`
IsStarred bool `json:"isStarred"`
}
// SearchDashboards search dashboards by substring of their title. It allows restrict the result set with
// only starred dashboards and only for tags (logical OR applied to multiple tags).
func (r *Client) SearchDashboards(query string, starred bool, tags ...string) ([]FoundBoard, error) {
var (
raw []byte
boards []FoundBoard
code int
err error
)
u := url.URL{}
q := u.Query()
if query != "" {
q.Set("query", query)
}
if starred {
q.Set("starred", "true")
}
for _, tag := range tags {
q.Add("tag", tag)
}
if raw, code, err = r.get("api/search", q); err != nil {
return nil, err
}
if code != 200 {
return nil, fmt.Errorf("HTTP error %d: returns %s", code, raw)
}
err = json.Unmarshal(raw, &boards)
return boards, err
}
// SetDashboard updates existing dashboard or creates a new one.
// Set dasboard ID to nil to create a new dashboard.
// Set overwrite to true if you want to overwrite existing dashboard with
// newer version or with same dashboard title.
// Grafana only can create or update a dashboard in a database. File dashboards
// may be only loaded with HTTP API but not created or updated.
func (r *Client) SetDashboard(board Board, overwrite bool) error {
var (
isBoardFromDB bool
newBoard struct {
Dashboard Board `json:"dashboard"`
Overwrite bool `json:"overwrite"`
}
raw []byte
resp StatusMessage
code int
err error
)
if board.Slug, isBoardFromDB = cleanPrefix(board.Slug); !isBoardFromDB {
return errors.New("only database dashboard (with 'db/' prefix in a slug) can be set")
}
newBoard.Dashboard = board
newBoard.Overwrite = overwrite
if !overwrite {
newBoard.Dashboard.ID = 0
}
if raw, err = json.Marshal(newBoard); err != nil {
return err
}
if raw, code, err = r.post("api/dashboards/db", nil, raw); err != nil {
return err
}
if err = json.Unmarshal(raw, &resp); err != nil {
return err
}
switch code {
case 401:
return fmt.Errorf("%d %s", code, *resp.Message)
case 412:
return fmt.Errorf("%d %s", code, *resp.Message)
}
return nil
}
// SetRawDashboard updates existing dashboard or creates a new one.
// Contrary to SetDashboard() it accepts raw JSON instead of Board structure.
// Grafana only can create or update a dashboard in a database. File dashboards
// may be only loaded with HTTP API but not created or updated.
func (r *Client) SetRawDashboard(raw []byte) error {
var (
rawResp []byte
resp StatusMessage
code int
err error
buf bytes.Buffer
plain = make(map[string]interface{})
)
if err = json.Unmarshal(raw, &plain); err != nil {
return err
}
// TODO(axel) fragile place, refactor it
plain["id"] = 0
raw, _ = json.Marshal(plain)
buf.WriteString(`{"dashboard":`)
buf.Write(raw)
buf.WriteString(`, "overwrite": true}`)
if rawResp, code, err = r.post("api/dashboards/db", nil, buf.Bytes()); err != nil {
return err
}
if err = json.Unmarshal(rawResp, &resp); err != nil {
return err
}
switch code {
case 401:
return fmt.Errorf("%d %s", code, *resp.Message)
case 412:
return fmt.Errorf("%d %s", code, *resp.Message)
}
return nil
}
// DeleteDashboard deletes dashboard that selected by slug string.
// Grafana only can delete a dashboard in a database. File dashboards
// may be only loaded with HTTP API but not deteled.
func (r *Client) DeleteDashboard(slug string) (StatusMessage, error) {
var (
isBoardFromDB bool
raw []byte
reply StatusMessage
err error
)
if slug, isBoardFromDB = cleanPrefix(slug); !isBoardFromDB {
return StatusMessage{}, errors.New("only database dashboards (with 'db/' prefix in a slug) can be removed")
}
if raw, err = r.delete(fmt.Sprintf("api/dashboards/db/%s", slug)); err != nil {
return StatusMessage{}, err
}
err = json.Unmarshal(raw, &reply)
return reply, err
}
// implicitely use dashboards from Grafana DB not from a file system
func setPrefix(slug string) (string, bool) {
if strings.HasPrefix(slug, "db") {
return slug, true
}
if strings.HasPrefix(slug, "file") {
return slug, false
}
return fmt.Sprintf("db/%s", slug), true
}
// assume we use database dashboard by default
func cleanPrefix(slug string) (string, bool) {
if strings.HasPrefix(slug, "db") {
return slug[3:], true
}
if strings.HasPrefix(slug, "file") {
return slug[3:], false
}
return fmt.Sprintf("%s", slug), true
}
Fix comment spelling error.
package sdk
/*
Copyright 2016 Alexander I.Grafov <grafov@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ॐ तारे तुत्तारे तुरे स्व
*/
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/url"
"strings"
"time"
)
// BoardProperties keeps metadata of a dashboard.
type BoardProperties struct {
IsStarred bool `json:"isStarred,omitempty"`
IsHome bool `json:"isHome,omitempty"`
IsSnapshot bool `json:"isSnapshot,omitempty"`
Type string `json:"type,omitempty"`
CanSave bool `json:"canSave"`
CanEdit bool `json:"canEdit"`
CanStar bool `json:"canStar"`
Slug string `json:"slug"`
Expires time.Time `json:"expires"`
Created time.Time `json:"created"`
Updated time.Time `json:"updated"`
UpdatedBy string `json:"updatedBy"`
CreatedBy string `json:"createdBy"`
Version int `json:"version"`
}
// GetDashboard loads a dashboard from Grafana instance along with metadata for a dashboard.
// For dashboards from a filesystem set "file/" prefix for slug. By default dashboards from
// a database assumed. Database dashboards may have "db/" prefix or may have not, it will
// be appended automatically.
func (r *Client) GetDashboard(slug string) (Board, BoardProperties, error) {
var (
raw []byte
result struct {
Meta BoardProperties `json:"meta"`
Board Board `json:"dashboard"`
}
code int
err error
)
slug, _ = setPrefix(slug)
if raw, code, err = r.get(fmt.Sprintf("api/dashboards/%s", slug), nil); err != nil {
return Board{}, BoardProperties{}, err
}
if code != 200 {
return Board{}, BoardProperties{}, fmt.Errorf("HTTP error %d: returns %s", code, raw)
}
dec := json.NewDecoder(bytes.NewReader(raw))
dec.UseNumber()
if err := dec.Decode(&result); err != nil {
return Board{}, BoardProperties{}, fmt.Errorf("unmarshal board with meta: %s\n%s", err, raw)
}
return result.Board, result.Meta, err
}
// GetRawDashboard loads a dashboard JSON from Grafana instance along with metadata for a dashboard.
// Contrary to GetDashboard() it not unpack loaded JSON to Board structure. Instead it
// returns it as byte slice. It guarantee that data of dashboard returned untouched by conversion
// with Board so no matter how properly fields from a current version of Grafana mapped to
// our Board fields. It useful for backuping purposes when you want a dashboard exactly with
// same data as it exported by Grafana.
//
// For dashboards from a filesystem set "file/" prefix for slug. By default dashboards from
// a database assumed. Database dashboards may have "db/" prefix or may have not, it will
// be appended automatically.
func (r *Client) GetRawDashboard(slug string) ([]byte, BoardProperties, error) {
var (
raw []byte
result struct {
Meta BoardProperties `json:"meta"`
Board json.RawMessage `json:"dashboard"`
}
code int
err error
)
slug, _ = setPrefix(slug)
if raw, code, err = r.get(fmt.Sprintf("api/dashboards/%s", slug), nil); err != nil {
return nil, BoardProperties{}, err
}
if code != 200 {
return nil, BoardProperties{}, fmt.Errorf("HTTP error %d: returns %s", code, raw)
}
dec := json.NewDecoder(bytes.NewReader(raw))
dec.UseNumber()
if err := dec.Decode(&result); err != nil {
return nil, BoardProperties{}, fmt.Errorf("unmarshal board with meta: %s\n%s", err, raw)
}
return []byte(result.Board), result.Meta, err
}
// FoundBoard keeps result of search with metadata of a dashboard.
type FoundBoard struct {
ID uint `json:"id"`
Title string `json:"title"`
URI string `json:"uri"`
Type string `json:"type"`
Tags []string `json:"tags"`
IsStarred bool `json:"isStarred"`
}
// SearchDashboards search dashboards by substring of their title. It allows restrict the result set with
// only starred dashboards and only for tags (logical OR applied to multiple tags).
func (r *Client) SearchDashboards(query string, starred bool, tags ...string) ([]FoundBoard, error) {
var (
raw []byte
boards []FoundBoard
code int
err error
)
u := url.URL{}
q := u.Query()
if query != "" {
q.Set("query", query)
}
if starred {
q.Set("starred", "true")
}
for _, tag := range tags {
q.Add("tag", tag)
}
if raw, code, err = r.get("api/search", q); err != nil {
return nil, err
}
if code != 200 {
return nil, fmt.Errorf("HTTP error %d: returns %s", code, raw)
}
err = json.Unmarshal(raw, &boards)
return boards, err
}
// SetDashboard updates existing dashboard or creates a new one.
// Set dasboard ID to nil to create a new dashboard.
// Set overwrite to true if you want to overwrite existing dashboard with
// newer version or with same dashboard title.
// Grafana only can create or update a dashboard in a database. File dashboards
// may be only loaded with HTTP API but not created or updated.
func (r *Client) SetDashboard(board Board, overwrite bool) error {
var (
isBoardFromDB bool
newBoard struct {
Dashboard Board `json:"dashboard"`
Overwrite bool `json:"overwrite"`
}
raw []byte
resp StatusMessage
code int
err error
)
if board.Slug, isBoardFromDB = cleanPrefix(board.Slug); !isBoardFromDB {
return errors.New("only database dashboard (with 'db/' prefix in a slug) can be set")
}
newBoard.Dashboard = board
newBoard.Overwrite = overwrite
if !overwrite {
newBoard.Dashboard.ID = 0
}
if raw, err = json.Marshal(newBoard); err != nil {
return err
}
if raw, code, err = r.post("api/dashboards/db", nil, raw); err != nil {
return err
}
if err = json.Unmarshal(raw, &resp); err != nil {
return err
}
switch code {
case 401:
return fmt.Errorf("%d %s", code, *resp.Message)
case 412:
return fmt.Errorf("%d %s", code, *resp.Message)
}
return nil
}
// SetRawDashboard updates existing dashboard or creates a new one.
// Contrary to SetDashboard() it accepts raw JSON instead of Board structure.
// Grafana only can create or update a dashboard in a database. File dashboards
// may be only loaded with HTTP API but not created or updated.
func (r *Client) SetRawDashboard(raw []byte) error {
var (
rawResp []byte
resp StatusMessage
code int
err error
buf bytes.Buffer
plain = make(map[string]interface{})
)
if err = json.Unmarshal(raw, &plain); err != nil {
return err
}
// TODO(axel) fragile place, refactor it
plain["id"] = 0
raw, _ = json.Marshal(plain)
buf.WriteString(`{"dashboard":`)
buf.Write(raw)
buf.WriteString(`, "overwrite": true}`)
if rawResp, code, err = r.post("api/dashboards/db", nil, buf.Bytes()); err != nil {
return err
}
if err = json.Unmarshal(rawResp, &resp); err != nil {
return err
}
switch code {
case 401:
return fmt.Errorf("%d %s", code, *resp.Message)
case 412:
return fmt.Errorf("%d %s", code, *resp.Message)
}
return nil
}
// DeleteDashboard deletes dashboard that selected by slug string.
// Grafana only can delete a dashboard in a database. File dashboards
// may be only loaded with HTTP API but not deteled.
func (r *Client) DeleteDashboard(slug string) (StatusMessage, error) {
var (
isBoardFromDB bool
raw []byte
reply StatusMessage
err error
)
if slug, isBoardFromDB = cleanPrefix(slug); !isBoardFromDB {
return StatusMessage{}, errors.New("only database dashboards (with 'db/' prefix in a slug) can be removed")
}
if raw, err = r.delete(fmt.Sprintf("api/dashboards/db/%s", slug)); err != nil {
return StatusMessage{}, err
}
err = json.Unmarshal(raw, &reply)
return reply, err
}
// implicitly use dashboards from Grafana DB not from a file system
func setPrefix(slug string) (string, bool) {
if strings.HasPrefix(slug, "db") {
return slug, true
}
if strings.HasPrefix(slug, "file") {
return slug, false
}
return fmt.Sprintf("db/%s", slug), true
}
// assume we use database dashboard by default
func cleanPrefix(slug string) (string, bool) {
if strings.HasPrefix(slug, "db") {
return slug[3:], true
}
if strings.HasPrefix(slug, "file") {
return slug[3:], false
}
return fmt.Sprintf("%s", slug), true
}
|
package model
import (
"fmt"
"strings"
"github.com/evandroflores/claimr/database"
"github.com/jinzhu/gorm"
log "github.com/sirupsen/logrus"
)
func init() {
database.DB.AutoMigrate(&Container{})
}
// Container defines the Container information on database.
type Container struct {
gorm.Model
TeamID string `gorm:"not null"`
ChannelID string `gorm:"not null"`
Name string `gorm:"not null"`
InUseBy string
InUseForReason string
CreatedByUser string
}
// MaxNameSize is the max number of characters for a container name.
const MaxNameSize = 22
func isValidContainerInput(teamID string, channelID string, containerName string) (bool, error) {
fields := []struct {
name string
value string
}{
{"teamID", teamID},
{"channelID", channelID},
{"container name", containerName},
}
for _, field := range fields {
err := checkRequired(field.name, field.value)
if err != nil {
return false, err
}
}
if len(containerName) > MaxNameSize {
return false, fmt.Errorf("try a smaller container name up to %d characters", MaxNameSize)
}
return true, nil
}
func checkRequired(fieldName string, fieldValue string) error {
if fieldValue == "" {
return fmt.Errorf("can not continue without a %s 🙄", fieldName)
}
return nil
}
// GetContainer returns a container for teamID, channelID, and name provided
func GetContainer(teamID string, channelID string, name string) (Container, error) {
result := Container{}
valid, err := isValidContainerInput(teamID, channelID, name)
if !valid {
log.Errorf("GetContainer: [%s, %s, %s] %s", teamID, channelID, name, err)
return result, err
}
database.DB.Where(&Container{TeamID: teamID, ChannelID: channelID, Name: strings.ToLower(name)}).
First(&result)
return result, nil
}
// GetContainers returns a list of containers for the given TeamID and ChannelID
func GetContainers(teamID string, channelID string) ([]Container, error) {
results := []Container{}
valid, err := isValidContainerInput(teamID, channelID, ".")
if !valid {
log.Errorf("GetContainers: [%s, %s] %s", teamID, channelID, err)
return results, err
}
database.DB.Where(&Container{TeamID: teamID, ChannelID: channelID}).
Find(&results)
return results, nil
}
// Add a given Container to database
func (container Container) Add() error {
existingContainer, err := GetContainer(container.TeamID, container.ChannelID, container.Name)
if err != nil {
return err
}
if existingContainer != (Container{}) {
return fmt.Errorf("there is a container with the same name on this channel. Try a different one 😕")
}
container.Name = strings.ToLower(container.Name)
database.DB.Create(&container)
return nil
}
// Update a given Container
func (container Container) Update() error {
existingContainer, err := GetContainer(container.TeamID, container.ChannelID, strings.ToLower(container.Name))
if err != nil {
return err
}
if existingContainer == (Container{}) {
return fmt.Errorf("could not find this container on this channel. Can not update 😕")
}
existingContainer.InUseBy = container.InUseBy
existingContainer.InUseForReason = container.InUseForReason
database.DB.Save(&existingContainer)
return nil
}
// Delete removes a Container from the database
func (container Container) Delete() error {
existingContainer, err := GetContainer(container.TeamID, container.ChannelID, strings.ToLower(container.Name))
if err != nil {
return err
}
if existingContainer == (Container{}) {
return fmt.Errorf("could not find this container on this channel. Can not delete 😕")
}
database.DB.Delete(&existingContainer)
return nil
}
removing logs
package model
import (
"fmt"
"strings"
"github.com/evandroflores/claimr/database"
"github.com/jinzhu/gorm"
)
func init() {
database.DB.AutoMigrate(&Container{})
}
// Container defines the Container information on database.
type Container struct {
gorm.Model
TeamID string `gorm:"not null"`
ChannelID string `gorm:"not null"`
Name string `gorm:"not null"`
InUseBy string
InUseForReason string
CreatedByUser string
}
// MaxNameSize is the max number of characters for a container name.
const MaxNameSize = 22
func isValidContainerInput(teamID string, channelID string, containerName string) (bool, error) {
fields := []struct {
name string
value string
}{
{"teamID", teamID},
{"channelID", channelID},
{"container name", containerName},
}
for _, field := range fields {
err := checkRequired(field.name, field.value)
if err != nil {
return false, err
}
}
if len(containerName) > MaxNameSize {
return false, fmt.Errorf("try a smaller container name up to %d characters", MaxNameSize)
}
return true, nil
}
func checkRequired(fieldName string, fieldValue string) error {
if fieldValue == "" {
return fmt.Errorf("can not continue without a %s 🙄", fieldName)
}
return nil
}
// GetContainer returns a container for teamID, channelID, and name provided
func GetContainer(teamID string, channelID string, name string) (Container, error) {
result := Container{}
valid, err := isValidContainerInput(teamID, channelID, name)
if !valid {
return result, err
}
database.DB.Where(&Container{TeamID: teamID, ChannelID: channelID, Name: strings.ToLower(name)}).
First(&result)
return result, nil
}
// GetContainers returns a list of containers for the given TeamID and ChannelID
func GetContainers(teamID string, channelID string) ([]Container, error) {
results := []Container{}
valid, err := isValidContainerInput(teamID, channelID, ".")
if !valid {
return results, err
}
database.DB.Where(&Container{TeamID: teamID, ChannelID: channelID}).
Find(&results)
return results, nil
}
// Add a given Container to database
func (container Container) Add() error {
existingContainer, err := GetContainer(container.TeamID, container.ChannelID, container.Name)
if err != nil {
return err
}
if existingContainer != (Container{}) {
return fmt.Errorf("there is a container with the same name on this channel. Try a different one 😕")
}
container.Name = strings.ToLower(container.Name)
database.DB.Create(&container)
return nil
}
// Update a given Container
func (container Container) Update() error {
existingContainer, err := GetContainer(container.TeamID, container.ChannelID, strings.ToLower(container.Name))
if err != nil {
return err
}
if existingContainer == (Container{}) {
return fmt.Errorf("could not find this container on this channel. Can not update 😕")
}
existingContainer.InUseBy = container.InUseBy
existingContainer.InUseForReason = container.InUseForReason
database.DB.Save(&existingContainer)
return nil
}
// Delete removes a Container from the database
func (container Container) Delete() error {
existingContainer, err := GetContainer(container.TeamID, container.ChannelID, strings.ToLower(container.Name))
if err != nil {
return err
}
if existingContainer == (Container{}) {
return fmt.Errorf("could not find this container on this channel. Can not delete 😕")
}
database.DB.Delete(&existingContainer)
return nil
}
|
package main
import (
"eaciit/gdrj/model"
"eaciit/gdrj/modules"
"os"
"github.com/eaciit/dbox"
"github.com/eaciit/orm/v1"
"github.com/eaciit/toolkit"
// "strings"
"time"
)
var conn dbox.IConnection
var count int
var (
t0 time.Time
fiscalyear, iscount, scount int
data map[string]float64
masters = toolkit.M{}
)
func setinitialconnection() {
var err error
conn, err = modules.GetDboxIConnection("db_godrej")
if err != nil {
toolkit.Println("Initial connection found : ", err)
os.Exit(1)
}
err = gdrj.SetDb(conn)
if err != nil {
toolkit.Println("Initial connection found : ", err)
os.Exit(1)
}
}
type plalloc struct {
Key string
/*
Current float64
Expect float64
Total float64
*/
TotalSales float64
TotalValue float64
ChannelSales map[string]float64
ChannelValue map[string]float64
ExpectedValue map[string]float64
Ratio map[string]float64
}
var plallocs = map[string]*plalloc{}
func buildRatio(tn string) error {
plallocs = map[string]*plalloc{}
rconn, _ := modules.GetDboxIConnection("db_godrej")
defer rconn.Close()
//totalBlanks := map[string]float64{}
ctrx, e := rconn.NewQuery().From(tn).
Select().Cursor(nil)
if e != nil {
return e
}
defer ctrx.Close()
count := ctrx.Count()
i := 0
for {
mtrx := toolkit.M{}
if e = ctrx.Fetch(&mtrx, 1, false); e != nil {
break
}
i++
toolkit.Printfn("Ratio 1: %d of %d in %s",
i, count, time.Since(t0).String())
key := mtrx.Get("key", toolkit.M{}).(toolkit.M)
fiscal := key.GetString("date_fiscal")
channel := key.GetString("customer_reportchannel")
sales := mtrx.GetFloat64("PL8A")
value := mtrx.GetFloat64("PL7A")
if toolkit.HasMember([]string{"GT", "MT", "RD"}, channel) {
falloc := plallocs[fiscal]
if falloc == nil {
falloc = new(plalloc)
falloc.Ratio = map[string]float64{}
falloc.ChannelValue = map[string]float64{}
falloc.ChannelSales = map[string]float64{}
falloc.ExpectedValue = map[string]float64{}
}
falloc.Key = fiscal
falloc.TotalSales += sales
falloc.TotalValue += value
falloc.ChannelSales[channel] = falloc.ChannelSales[channel] + sales
falloc.ChannelValue[channel] = falloc.ChannelValue[channel] + value
plallocs[fiscal] = falloc
}
}
for _, falloc := range plallocs {
for c, _ := range falloc.ChannelSales {
falloc.Ratio[c] = toolkit.Div(falloc.ChannelSales[c], falloc.TotalSales)
falloc.ExpectedValue[c] = falloc.Ratio[c] * falloc.TotalValue
}
}
return nil
}
func main() {
t0 = time.Now()
setinitialconnection()
defer gdrj.CloseDb()
prepmastercalc()
toolkit.Println("Start data query...")
tablenames := []string{
"salespls-summary"}
for _, tn := range tablenames {
e := buildRatio(tn)
if e != nil {
toolkit.Printfn("Build ratio error: %s - %s", tn, e.Error())
return
}
e = processTable(tn)
if e != nil {
toolkit.Printfn("Process table error: %s - %s", tn, e.Error())
return
}
}
}
func processTable(tn string) error {
cursor, _ := conn.NewQuery().From(tn).Select().Cursor(nil)
defer cursor.Close()
count := cursor.Count()
i := 0
for {
mr := toolkit.M{}
ef := cursor.Fetch(&mr, 1, false)
if ef != nil {
break
}
i++
toolkit.Printfn("Processing %s, %d of %d in %s",
tn, i, count, time.Since(t0).String())
key := mr.Get("key", toolkit.M{}).(toolkit.M)
fiscal := key.GetString("date_fiscal")
channel := key.GetString("customer_reportchannel")
sales := mr.GetFloat64("PL8A")
value := mr.GetFloat64("PL7A")
newv := value
falloc := plallocs[fiscal]
if channel == "RD" {
newv = sales * falloc.ExpectedValue[channel] / falloc.ChannelSales[channel]
key.Set("customer_customergroupname", "RD")
key.Set("customer_customergroup", "RD")
} else {
if value != 0 {
newv = value * falloc.ExpectedValue[channel] / falloc.ChannelValue[channel]
}
}
mr.Set("key", key)
mr.Set("PL7A", newv)
mr = CalcSum(mr)
esave := conn.NewQuery().From(tn).Save().Exec(toolkit.M{}.Set("data", mr))
if esave != nil {
return esave
}
}
return nil
}
func CalcSum(tkm toolkit.M) toolkit.M {
var netsales, cogs, grossmargin, sellingexpense,
sga, opincome, directexpense, indirectexpense,
royaltiestrademark, advtpromoexpense, operatingexpense,
freightexpense, nonoprincome, ebt, taxexpense,
percentpbt, eat, totdepreexp, damagegoods, ebitda, ebitdaroyalties, ebitsga,
grosssales, discount, advexp, promoexp, spgexp float64
exclude := []string{"PL8A", "PL14A", "PL74A", "PL26A", "PL32A", "PL39A", "PL41A", "PL44A",
"PL74B", "PL74C", "PL32B", "PL94B", "PL94C", "PL39B", "PL41B", "PL41C", "PL44B", "PL44C", "PL44D", "PL44E",
"PL44F", "PL6A", "PL0", "PL28", "PL29A", "PL31"}
//"PL94A",
plmodels := masters.Get("plmodel").(map[string]*gdrj.PLModel)
inexclude := func(f string) bool {
for _, v := range exclude {
if v == f {
return true
}
}
return false
}
for k, v := range tkm {
if k == "_id" {
continue
}
if inexclude(k) {
continue
}
// arrk := strings.Split(k, "_")
plmodel, exist := plmodels[k]
if !exist {
//toolkit.Println(k)
continue
}
Amount := toolkit.ToFloat64(v, 6, toolkit.RoundingAuto)
// PLHeader1
// PLHeader2
// PLHeader3
// switch v.Group1 {
switch plmodel.PLHeader1 {
case "Net Sales":
netsales += Amount
case "Direct Expense":
directexpense += Amount
case "Indirect Expense":
indirectexpense += Amount
case "Freight Expense":
freightexpense += Amount
case "Royalties & Trademark Exp":
royaltiestrademark += Amount
case "Advt & Promo Expenses":
advtpromoexpense += Amount
case "G&A Expenses":
sga += Amount
case "Non Operating (Income) / Exp":
nonoprincome += Amount
case "Tax Expense":
taxexpense += Amount
case "Total Depreciation Exp":
if plmodel.PLHeader2 == "Damaged Goods" {
damagegoods += Amount
} else {
totdepreexp += Amount
}
}
// switch v.Group2 {
switch plmodel.PLHeader2 {
case "Gross Sales":
grosssales += Amount
case "Discount":
discount += Amount
case "Advertising Expenses":
advexp += Amount
case "Promotions Expenses":
promoexp += Amount
case "SPG Exp / Export Cost":
spgexp += Amount
}
}
cogs = directexpense + indirectexpense
grossmargin = netsales + cogs
sellingexpense = freightexpense + royaltiestrademark + advtpromoexpense
operatingexpense = sellingexpense + sga
opincome = grossmargin + operatingexpense
ebt = opincome + nonoprincome //asume nonopriceincome already minus
percentpbt = 0
if ebt != 0 {
percentpbt = taxexpense / ebt * 100
}
eat = ebt + taxexpense
ebitda = totdepreexp + damagegoods + opincome
ebitdaroyalties = ebitda - royaltiestrademark
ebitsga = opincome - sga
ebitsgaroyalty := ebitsga - royaltiestrademark
tkm.Set("PL0", grosssales)
tkm.Set("PL6A", discount)
tkm.Set("PL8A", netsales)
tkm.Set("PL14A", directexpense)
tkm.Set("PL74A", indirectexpense)
tkm.Set("PL26A", royaltiestrademark)
tkm.Set("PL32A", advtpromoexpense)
tkm.Set("PL94A", sga)
tkm.Set("PL39A", nonoprincome)
tkm.Set("PL41A", taxexpense)
tkm.Set("PL44A", totdepreexp)
tkm.Set("PL28", advexp)
tkm.Set("PL29A", promoexp)
tkm.Set("PL31", spgexp)
tkm.Set("PL74B", cogs)
tkm.Set("PL74C", grossmargin)
tkm.Set("PL32B", sellingexpense)
tkm.Set("PL94B", operatingexpense)
tkm.Set("PL94C", opincome)
tkm.Set("PL39B", ebt)
tkm.Set("PL41B", percentpbt)
tkm.Set("PL41C", eat)
tkm.Set("PL44B", opincome)
tkm.Set("PL44C", ebitda)
tkm.Set("PL44D", ebitdaroyalties)
tkm.Set("PL44E", ebitsga)
tkm.Set("PL44F", ebitsgaroyalty)
return tkm
}
func buildmap(holder interface{},
fnModel func() orm.IModel,
filter *dbox.Filter,
fnIter func(holder interface{}, obj interface{})) interface{} {
crx, ecrx := gdrj.Find(fnModel(), filter, nil)
if ecrx != nil {
toolkit.Printfn("Cursor Error: %s", ecrx.Error())
os.Exit(100)
}
defer crx.Close()
for {
s := fnModel()
e := crx.Fetch(s, 1, false)
if e != nil {
break
}
fnIter(holder, s)
}
return holder
}
func prepmastercalc() {
toolkit.Println("--> PL MODEL")
masters.Set("plmodel", buildmap(map[string]*gdrj.PLModel{},
func() orm.IModel {
return new(gdrj.PLModel)
},
nil,
func(holder, obj interface{}) {
h := holder.(map[string]*gdrj.PLModel)
o := obj.(*gdrj.PLModel)
h[o.ID] = o
}).(map[string]*gdrj.PLModel))
}
remd actc
|
package render
import (
"container/heap"
"image"
"image/color"
"image/draw"
"time"
"bitbucket.org/oakmoundstudio/oak/event"
"bitbucket.org/oakmoundstudio/oak/timing"
)
var (
rh *RenderableHeap
srh *RenderableHeap
toPushRenderables []Renderable
toPushStatic []Renderable
preDrawBind event.Binding
resetHeap bool
EmptyRenderable = NewColorBox(1, 1, color.RGBA{0, 0, 0, 0})
//EmptyRenderable = new(Composite)
)
// Drawing does not actually immediately draw a renderable,
// instead the renderable is added to a list of elements to
// be drawn next frame. This avoids issues where elements
// are added to the heap while it is being drawn.
func Draw(r Renderable, l int) Renderable {
r.SetLayer(l)
toPushRenderables = append(toPushRenderables, r)
return r
}
func StaticDraw(r Renderable, l int) Renderable {
r.SetLayer(l)
toPushStatic = append(toPushStatic, r)
return r
}
// PreDraw parses through renderables to be pushed
// and adds them to the drawheap.
func PreDraw() {
i := 0
// defer func() {
// if x := recover(); x != nil {
// dlog.Error("Invalid Memory Address in toPushRenderables")
// // This does not work-- all addresses following the bad address
// // at i are also bad
// //toPushRenderables = toPushRenderables[i+1:]
// toPushRenderables = []Renderable{}
// }
// }()
if resetHeap == true {
InitDrawHeap()
resetHeap = false
} else {
for _, r := range toPushRenderables {
if r != nil {
heap.Push(rh, r)
}
i++
}
for _, r := range toPushStatic {
heap.Push(srh, r)
}
}
toPushStatic = []Renderable{}
toPushRenderables = []Renderable{}
}
// LoadSpriteAndDraw is shorthand for LoadSprite
// followed by Draw.
func LoadSpriteAndDraw(filename string, l int) *Sprite {
s := LoadSprite(filename)
return Draw(s, l).(*Sprite)
}
// DrawColor is equivalent to LoadSpriteAndDraw,
// but with colorboxes.
func DrawColor(c color.Color, x1, y1, x2, y2 float64, l int) {
cb := NewColorBox(int(x2), int(y2), c)
cb.ShiftX(x1)
cb.ShiftY(y1)
Draw(cb, l)
}
// DrawHeap takes every element in the heap
// and draws them as it removes them. It
// filters out elements who have the layer
// -1, reserved for elements to be undrawn.
func DrawHeap(target *image.RGBA, viewPos image.Point, screenW, screenH int) {
drawRenderableHeap(target, rh, viewPos, screenW, screenH)
}
func DrawStaticHeap(target *image.RGBA) {
newRh := &RenderableHeap{}
for srh.Len() > 0 {
rp := heap.Pop(srh)
if rp != nil {
r := rp.(Renderable)
if r.GetLayer() != -1 {
r.Draw(target)
heap.Push(newRh, r)
}
}
}
*srh = *newRh
}
func drawRenderableHeap(target *image.RGBA, rheap *RenderableHeap, viewPos image.Point, screenW, screenH int) {
newRh := &RenderableHeap{}
for rheap.Len() > 0 {
intf := heap.Pop(rheap)
if intf != nil {
r := intf.(Renderable)
if r.GetLayer() != -1 {
x := int(r.GetX())
y := int(r.GetY())
x2 := x
y2 := y
rgba := r.GetRGBA()
if rgba != nil {
max := rgba.Bounds().Max
x += max.X
y += max.Y
// Artificial width and height added due to bug in polygon checking alg
} else {
x += 6
y += 6
}
if x > viewPos.X && y > viewPos.Y &&
x2 < viewPos.X+screenW && y2 < viewPos.Y+screenH {
if InDrawPolygon(x, y, x2, y2) {
r.Draw(target)
}
}
heap.Push(newRh, r)
}
}
}
*rheap = *newRh
}
// UndrawAfter will trigger a renderable's undraw function
// after a given time has passed
func UndrawAfter(r Renderable, t time.Duration) {
go func(r Renderable, t time.Duration) {
timing.DoAfter(t, func() {
r.UnDraw()
})
}(r, t)
}
// DrawForTime is a wrapper for Draw and UndrawAfter
func DrawForTime(r Renderable, l int, t time.Duration) {
Draw(r, l)
UndrawAfter(r, t)
}
// ShinyDraw performs a draw operation at -x, -y, because
// shiny/screen represents quadrant 4 as negative in both axes.
// draw.Over will merge two pixels at a given position based on their
// alpha channel.
func ShinyDraw(buff draw.Image, img image.Image, x, y int) {
draw.Draw(buff, buff.Bounds(),
img, image.Point{-x, -y}, draw.Over)
}
// draw.Src will overwrite pixels beneath the given image regardless of
// the new image's alpha.
func ShinyOverwrite(buff draw.Image, img image.Image, x, y int) {
draw.Draw(buff, buff.Bounds(),
img, image.Point{-x, -y}, draw.Src)
}
Un-commented out bad memory checker in draw heap
package render
import (
"container/heap"
"image"
"image/color"
"image/draw"
"time"
"bitbucket.org/oakmoundstudio/oak/dlog"
"bitbucket.org/oakmoundstudio/oak/event"
"bitbucket.org/oakmoundstudio/oak/timing"
)
var (
rh *RenderableHeap
srh *RenderableHeap
toPushRenderables []Renderable
toPushStatic []Renderable
preDrawBind event.Binding
resetHeap bool
EmptyRenderable = NewColorBox(1, 1, color.RGBA{0, 0, 0, 0})
//EmptyRenderable = new(Composite)
)
// Drawing does not actually immediately draw a renderable,
// instead the renderable is added to a list of elements to
// be drawn next frame. This avoids issues where elements
// are added to the heap while it is being drawn.
func Draw(r Renderable, l int) Renderable {
r.SetLayer(l)
toPushRenderables = append(toPushRenderables, r)
return r
}
func StaticDraw(r Renderable, l int) Renderable {
r.SetLayer(l)
toPushStatic = append(toPushStatic, r)
return r
}
// PreDraw parses through renderables to be pushed
// and adds them to the drawheap.
func PreDraw() {
i := 0
defer func() {
if x := recover(); x != nil {
dlog.Error("Invalid Memory Address in toPushRenderables")
// This does not work-- all addresses following the bad address
// at i are also bad
//toPushRenderables = toPushRenderables[i+1:]
toPushRenderables = []Renderable{}
}
}()
if resetHeap == true {
InitDrawHeap()
resetHeap = false
} else {
for _, r := range toPushRenderables {
if r != nil {
heap.Push(rh, r)
}
i++
}
for _, r := range toPushStatic {
heap.Push(srh, r)
}
}
toPushStatic = []Renderable{}
toPushRenderables = []Renderable{}
}
// LoadSpriteAndDraw is shorthand for LoadSprite
// followed by Draw.
func LoadSpriteAndDraw(filename string, l int) *Sprite {
s := LoadSprite(filename)
return Draw(s, l).(*Sprite)
}
// DrawColor is equivalent to LoadSpriteAndDraw,
// but with colorboxes.
func DrawColor(c color.Color, x1, y1, x2, y2 float64, l int) {
cb := NewColorBox(int(x2), int(y2), c)
cb.ShiftX(x1)
cb.ShiftY(y1)
Draw(cb, l)
}
// DrawHeap takes every element in the heap
// and draws them as it removes them. It
// filters out elements who have the layer
// -1, reserved for elements to be undrawn.
func DrawHeap(target *image.RGBA, viewPos image.Point, screenW, screenH int) {
drawRenderableHeap(target, rh, viewPos, screenW, screenH)
}
func DrawStaticHeap(target *image.RGBA) {
newRh := &RenderableHeap{}
for srh.Len() > 0 {
rp := heap.Pop(srh)
if rp != nil {
r := rp.(Renderable)
if r.GetLayer() != -1 {
r.Draw(target)
heap.Push(newRh, r)
}
}
}
*srh = *newRh
}
func drawRenderableHeap(target *image.RGBA, rheap *RenderableHeap, viewPos image.Point, screenW, screenH int) {
newRh := &RenderableHeap{}
for rheap.Len() > 0 {
intf := heap.Pop(rheap)
if intf != nil {
r := intf.(Renderable)
if r.GetLayer() != -1 {
x := int(r.GetX())
y := int(r.GetY())
x2 := x
y2 := y
rgba := r.GetRGBA()
if rgba != nil {
max := rgba.Bounds().Max
x += max.X
y += max.Y
// Artificial width and height added due to bug in polygon checking alg
} else {
x += 6
y += 6
}
if x > viewPos.X && y > viewPos.Y &&
x2 < viewPos.X+screenW && y2 < viewPos.Y+screenH {
if InDrawPolygon(x, y, x2, y2) {
r.Draw(target)
}
}
heap.Push(newRh, r)
}
}
}
*rheap = *newRh
}
// UndrawAfter will trigger a renderable's undraw function
// after a given time has passed
func UndrawAfter(r Renderable, t time.Duration) {
go func(r Renderable, t time.Duration) {
timing.DoAfter(t, func() {
r.UnDraw()
})
}(r, t)
}
// DrawForTime is a wrapper for Draw and UndrawAfter
func DrawForTime(r Renderable, l int, t time.Duration) {
Draw(r, l)
UndrawAfter(r, t)
}
// ShinyDraw performs a draw operation at -x, -y, because
// shiny/screen represents quadrant 4 as negative in both axes.
// draw.Over will merge two pixels at a given position based on their
// alpha channel.
func ShinyDraw(buff draw.Image, img image.Image, x, y int) {
draw.Draw(buff, buff.Bounds(),
img, image.Point{-x, -y}, draw.Over)
}
// draw.Src will overwrite pixels beneath the given image regardless of
// the new image's alpha.
func ShinyOverwrite(buff draw.Image, img image.Image, x, y int) {
draw.Draw(buff, buff.Bounds(),
img, image.Point{-x, -y}, draw.Src)
}
|
// This file is part of Monsti, a web content management system.
// Copyright 2012-2013 Christian Neumann
//
// Monsti is free software: you can redistribute it and/or modify it under the
// terms of the GNU Affero General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option) any
// later version.
//
// Monsti is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
// A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
// details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Monsti. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"pkg.monsti.org/rpc/client"
"pkg.monsti.org/util/template"
utesting "pkg.monsti.org/util/testing"
"path/filepath"
"strings"
"testing"
)
func TestSplitFirstDir(t *testing.T) {
tests := []struct {
Path, First string
}{
{"", ""},
{"foo", "foo"},
{"foo/", "foo"},
{"foo/bar", "foo"},
{"/", ""},
{"/foo", "foo"},
{"/foo/", "foo"},
{"/foo/bar", "foo"}}
for _, test := range tests {
ret := splitFirstDir(test.Path)
if ret != test.First {
t.Errorf("splitFirstDir(%q) = %q, should be %q", test.Path, ret,
test.First)
}
}
}
func TestRenderInMaster(t *testing.T) {
masterTmpl := `{{.Page.Title}}
{{.Page.Description}}
{{range .Page.PrimaryNav}}#{{if .Active}}a{{end}}|{{.Target}}|{{.Name}}{{end}}
{{if .Page.ShowSecondaryNav}}
{{range .Page.SecondaryNav}}#{{if .Active}}a{{end}}{{if .Child}}c{{end}}|{{.Target}}|{{.Name}}{{end}}
{{end}}
{{with .Page.Sidebar}}
{{.}}
{{end}}
{{.Page.Content}}`
root, cleanup, err := utesting.CreateDirectoryTree(map[string]string{
"/data/foo/node.yaml": "title: Foo",
"/data/foo/child1/node.yaml": "title: Foo Child 1",
"/data/foo/child2/node.yaml": "title: Foo Child 2",
"/data/foo/child2/child1/node.yaml": "title: Foo Child 2 Child 1",
"/data/bar/node.yaml": "title: Bar",
"/data/cruz/node.yaml": "title: Cruz",
"/templates/master.html": masterTmpl}, "_monsti_TestRenderInMaster")
if err != nil {
t.Fatalf("Could not create temporary files: ", err)
}
defer cleanup()
renderer := template.Renderer{Root: filepath.Join(root, "templates")}
site := site{}
site.Directories.Data = filepath.Join(root, "data")
tests := []struct {
Node client.Node
Flags masterTmplFlags
Content, Rendered string
}{
{client.Node{Title: "Foo Child 2", Description: "Bar!", Path: "/foo/child2"}, 0,
"The content.", `Foo Child 2
Bar!
#|/bar/|Bar#|/cruz/|Cruz#a|/foo/|Foo
#|/foo/child1/|Foo Child 1#a|/foo/child2/|Foo Child 2#c|/foo/child2/child1/|Foo Child 2 Child 1
The content.`}}
for i, v := range tests {
session := client.Session{
User: &client.User{Login: "admin", Name: "Administrator"}}
env := masterTmplEnv{v.Node, &session, "", "", 0}
ret := renderInMaster(renderer, []byte(v.Content), env, new(settings),
site, "")
for strings.Contains(ret, "\n\n") {
ret = strings.Replace(ret, "\n\n", "\n", -1)
}
if ret != v.Rendered {
t.Errorf(`Test %v: renderInMaster(...) returned:
================================================================
%v
================================================================
Should be:
================================================================
%v
================================================================`,
i, ret, v.Rendered)
}
}
}
Disable broken tests.
// This file is part of Monsti, a web content management system.
// Copyright 2012-2013 Christian Neumann
//
// Monsti is free software: you can redistribute it and/or modify it under the
// terms of the GNU Affero General Public License as published by the Free
// Software Foundation, either version 3 of the License, or (at your option) any
// later version.
//
// Monsti is distributed in the hope that it will be useful, but WITHOUT ANY
// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
// A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
// details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Monsti. If not, see <http://www.gnu.org/licenses/>.
package main
import (
"testing"
)
func TestSplitFirstDir(t *testing.T) {
tests := []struct {
Path, First string
}{
{"", ""},
{"foo", "foo"},
{"foo/", "foo"},
{"foo/bar", "foo"},
{"/", ""},
{"/foo", "foo"},
{"/foo/", "foo"},
{"/foo/bar", "foo"}}
for _, test := range tests {
ret := splitFirstDir(test.Path)
if ret != test.First {
t.Errorf("splitFirstDir(%q) = %q, should be %q", test.Path, ret,
test.First)
}
}
}
/*
func TestRenderInMaster(t *testing.T) {
masterTmpl := `{{.Page.Title}}
{{.Page.Description}}
{{range .Page.PrimaryNav}}#{{if .Active}}a{{end}}|{{.Target}}|{{.Name}}{{end}}
{{if .Page.ShowSecondaryNav}}
{{range .Page.SecondaryNav}}#{{if .Active}}a{{end}}{{if .Child}}c{{end}}|{{.Target}}|{{.Name}}{{end}}
{{end}}
{{with .Page.Sidebar}}
{{.}}
{{end}}
{{.Page.Content}}`
root, cleanup, err := utesting.CreateDirectoryTree(map[string]string{
"/data/foo/node.yaml": "title: Foo",
"/data/foo/child1/node.yaml": "title: Foo Child 1",
"/data/foo/child2/node.yaml": "title: Foo Child 2",
"/data/foo/child2/child1/node.yaml": "title: Foo Child 2 Child 1",
"/data/bar/node.yaml": "title: Bar",
"/data/cruz/node.yaml": "title: Cruz",
"/templates/master.html": masterTmpl}, "_monsti_TestRenderInMaster")
if err != nil {
t.Fatalf("Could not create temporary files: ", err)
}
defer cleanup()
renderer := template.Renderer{Root: filepath.Join(root, "templates")}
site := util.SiteSettings{}
settings := util.MonstiSettings{}
settings.Directories.Data = filepath.Join(root, "data")
tests := []struct {
Node service.NodeInfo
Flags masterTmplFlags
Content, Rendered string
}{
{service.NodeInfo{Title: "Foo Child 2", Description: "Bar!",
Path: "/foo/child2"}, 0,
"The content.", `Foo Child 2
Bar!
#|/bar/|Bar#|/cruz/|Cruz#a|/foo/|Foo
#|/foo/child1/|Foo Child 1#a|/foo/child2/|Foo Child 2#c|/foo/child2/child1/|Foo Child 2 Child 1
The content.`}}
for i, v := range tests {
session := service.UserSession{
User: &service.User{Login: "admin", Name: "Administrator"}}
env := masterTmplEnv{v.Node, &session, "", "", 0}
ret := renderInMaster(renderer, []byte(v.Content), env, &settings,
site, "")
for strings.Contains(ret, "\n\n") {
ret = strings.Replace(ret, "\n\n", "\n", -1)
}
if ret != v.Rendered {
t.Errorf(`Test %v: renderInMaster(...) returned:
================================================================
%v
================================================================
Should be:
================================================================
%v
================================================================`,
i, ret, v.Rendered)
}
}
}
*/
|
package aranGO
Added replication
package aranGO
import(
"time"
"errors"
)
type CollectionParameters struct {
CollectionOptions
Id string `json:"cid"`
Version int `json:"version"`
Deleted bool `json:"deleted"`
}
type CollectionDump struct {
Parameters CollectionParameters `json:"parameters"`
Indexes []Index
}
type ReplicationState struct {
Running bool `json:"running"`
LastTick string `json:"lastLogTick"`
TotalEvents int64 `json:"totalEvents"`
Time time.Time `json:"time"`
}
type ReplicationInventory struct {
Collections []CollectionDump `json:"collections"`
State ReplicationState `json:"state"`
Tick string `json:"tick"`
}
// Returns replication inventory
func (db *Database) Inventory() (*ReplicationInventory,error) {
var rinv ReplicationInventory
res , err:= db.get("replication","inventory","GET",nil,&rinv,&rinv)
if err != nil {
return nil,err
}
switch res.Status() {
case 405,500:
return nil,errors.New("Error when dumping replication info")
default:
return &rinv,nil
}
}
type ServerInfo struct {
Id string `json:"serverId"`
Version string `json:"version"`
}
type Logger struct {
State ReplicationState `json:"state"`
Server ServerInfo `json:"server"`
Client []string `json:"clients"`
}
func (db *Database) LoggerState() (*Logger,error){
var log Logger
res , err:= db.get("replication","logger-state","GET",nil,&log,&log)
if err != nil {
return nil,err
}
switch res.Status() {
case 405,500:
return nil,errors.New("Logger state could not be determined")
default:
return &log,nil
}
}
type ApplierConf struct {
Endpoint string `json:"endpoint,omitempty"`
Database string `json:"database,omitempty"`
Username string `json:"username,omitempty"`
password string `json:"password,omitempty"`
Ssl int `json:"sslProtocol,omitempty"`
ReConnect int `json:"maxConnectRetries,omitempty"`
ConnectTimeout int `json:"connectTimeOut,omitempty"`
RequestTimeout int `json:"requestTimeOut,omitempty"`
Chunk int `json:"chunkSize,omitempty"`
AutoStart bool `json:"autoStart,omitempty"`
AdaptPolling bool `json:"adaptivePolling,omitempty"`
}
type ApplierProgress struct {
Time time.Time `json:"time"`
Message string `json:"message"`
Fails int `json:"failedConnects"`
}
type ApplierState struct {
Running bool `json:"running"`
Progress ApplierProgress `json:"progress"`
TotalRequests int `json:"totalRequests"`
FailConnects int `json:"totalFailedConnects"`
TotalEvents int `json:"totalEvents"`
Time time.Time `json:"time"`
}
type Applier struct {
State ApplierState `json:"state"`
Server ServerInfo `json:"server"`
Endpoint string `json:"endpoint"`
Database string `json:"database"`
}
func (db *Database) Applier() (*Applier,error){
var appl Applier
res , err:= db.get("replication","applier-config","GET",nil,&appl,&appl)
if err != nil {
return nil,err
}
switch res.Status(){
case 405,500:
return nil,errors.New("Applier state could not be determined")
default:
return &appl,nil
}
}
func (db *Database) ApplierConf() (*ApplierConf,error){
var appConf ApplierConf
res , err:= db.get("replication","applier-config","GET",nil,&appConf,&appConf)
if err != nil {
return nil,err
}
switch res.Status() {
case 405,500:
return nil,errors.New("Applier state could not be determined")
default:
return &appConf,nil
}
}
func (db *Database) SetApplierConf(appconf *ApplierConf) error {
if appconf == nil {
return errors.New("Invalid config")
}
res , err:= db.send("replication","applier-config","PUT",appconf,nil,nil)
if err != nil {
return err
}
switch res.Status() {
case 400:
return errors.New("Configuration is incomplete or malformed or applier running")
case 405,500:
return errors.New("Error occurred while assembling the response.")
default:
return nil
}
}
func (db *Database) StartReplication() error {
res , err:= db.send("replication","applier-start","PUT",nil,nil,nil)
if err != nil {
return err
}
switch res.Status() {
case 400:
return errors.New("Invalid applier configuration")
case 405,500:
return errors.New("Error starting replication")
default:
return nil
}
}
func (db *Database) StopReplication() error {
res , err:= db.send("replication","applier-stop","PUT",nil,nil,nil)
if err != nil {
return err
}
switch res.Status() {
case 405,500:
return errors.New("Error stoping replication")
default:
return nil
}
}
func (db *Database) ServerID() string {
server := map[string]string {}
_, err:= db.get("replication","server-id","GET",nil,&server,&server)
if err != nil {
return ""
}
return server["serverId"]
}
|
package agent
import (
"errors"
"fmt"
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/buildkite/agent/v3/logger"
)
type credentialsProvider struct {
retrieved bool
}
func (e *credentialsProvider) Retrieve() (creds credentials.Value, err error) {
e.retrieved = false
creds.AccessKeyID = os.Getenv("BUILDKITE_S3_ACCESS_KEY_ID")
if creds.AccessKeyID == "" {
creds.AccessKeyID = os.Getenv("BUILDKITE_S3_ACCESS_KEY")
}
creds.SecretAccessKey = os.Getenv("BUILDKITE_S3_SECRET_ACCESS_KEY")
if creds.SecretAccessKey == "" {
creds.SecretAccessKey = os.Getenv("BUILDKITE_S3_SECRET_KEY")
}
creds.SessionToken = os.Getenv("BUILDKITE_S3_SESSION_TOKEN")
if creds.AccessKeyID == "" {
err = errors.New("BUILDKITE_S3_ACCESS_KEY_ID or BUILDKITE_S3_ACCESS_KEY not found in environment")
}
if creds.SecretAccessKey == "" {
err = errors.New("BUILDKITE_S3_SECRET_ACCESS_KEY or BUILDKITE_S3_SECRET_KEY not found in environment")
}
e.retrieved = true
return
}
func (e *credentialsProvider) IsExpired() bool {
return !e.retrieved
}
func awsS3RegionFromEnv() (region string, err error) {
regionName := "us-east-1"
if os.Getenv("BUILDKITE_S3_DEFAULT_REGION") != "" {
regionName = os.Getenv("BUILDKITE_S3_DEFAULT_REGION")
} else {
var err error
regionName, err = awsRegion()
if err != nil {
return "", err
}
}
// Check to make sure the region exists.
resolver := endpoints.DefaultResolver()
partitions := resolver.(endpoints.EnumPartitions).Partitions()
for _, p := range partitions {
for id := range p.Regions() {
if id == regionName {
return regionName, nil
}
}
}
return "", fmt.Errorf("Unknown AWS S3 Region %q", regionName)
}
func awsS3Session(region string) (*session.Session, error) {
// Chicken and egg... but this is kinda how they do it in the sdk
sess, err := session.NewSession()
if err != nil {
return nil, err
}
sess.Config.Region = aws.String(region)
sess.Config.Credentials = credentials.NewChainCredentials(
[]credentials.Provider{
&credentialsProvider{},
&credentials.EnvProvider{},
webIdentityRoleProvider(sess),
// EC2 and ECS meta-data providers
defaults.RemoteCredProvider(*sess.Config, sess.Handlers),
})
return sess, nil
}
func webIdentityRoleProvider(sess *session.Session) *stscreds.WebIdentityRoleProvider {
return stscreds.NewWebIdentityRoleProvider(
sts.New(sess),
os.Getenv("AWS_ROLE_ARN"),
os.Getenv("AWS_ROLE_SESSION_NAME"),
os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"),
)
}
func newS3Client(l logger.Logger, bucket string) (*s3.S3, error) {
region, err := awsS3RegionFromEnv()
if err != nil {
return nil, err
}
sess, err := awsS3Session(region)
if err != nil {
return nil, err
}
l.Debug("Testing AWS S3 credentials and finding bucket `%s` in region `%s`...", bucket, region)
s3client := s3.New(sess)
// Test the authentication by trying to list the first 0 objects in the bucket.
_, err = s3client.ListObjects(&s3.ListObjectsInput{
Bucket: aws.String(bucket),
MaxKeys: aws.Int64(0),
})
if err != nil {
if err == credentials.ErrNoValidProvidersFoundInChain {
hasProxy := os.Getenv("HTTP_PROXY") != "" || os.Getenv("HTTPS_PROXY") != ""
hasNoProxyIdmsException := os.Getenv("NO_PROXY").Contains("169.254.169.254")
errorTitle := "Could not authenticate with AWS S3 using any of the included credential providers."
if hasProxy && !hasNoProxyIdmsException {
return nil, fmt.Errorf("%s Your HTTP proxy settings do not grant a NO_PROXY=169.254.169.254 exemption for the instance metadata service, instance profile credentials may not be retrievable via your HTTP proxy.", errorTitle)
}
return nil, fmt.Errorf("%s You can authenticate by setting Buildkite environment variables (BUILDKITE_S3_ACCESS_KEY_ID, BUILDKITE_S3_SECRET_ACCESS_KEY), AWS environment variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY), Web Identity environment variables (AWS_ROLE_ARN, AWS_ROLE_SESSION_NAME, AWS_WEB_IDENTITY_TOKEN_FILE), or if running on AWS EC2 ensuring network access to the EC2 Instance Metadata Service to use an instance profile’s IAM Role credentials.", errorTitle)
}
return nil, fmt.Errorf("Could not s3:ListObjects in your AWS S3 bucket `%s` in region `%s`: (%s)", bucket, region, err.Error())
}
return s3client, nil
}
Strings
package agent
import (
"errors"
"fmt"
"os"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/buildkite/agent/v3/logger"
)
type credentialsProvider struct {
retrieved bool
}
func (e *credentialsProvider) Retrieve() (creds credentials.Value, err error) {
e.retrieved = false
creds.AccessKeyID = os.Getenv("BUILDKITE_S3_ACCESS_KEY_ID")
if creds.AccessKeyID == "" {
creds.AccessKeyID = os.Getenv("BUILDKITE_S3_ACCESS_KEY")
}
creds.SecretAccessKey = os.Getenv("BUILDKITE_S3_SECRET_ACCESS_KEY")
if creds.SecretAccessKey == "" {
creds.SecretAccessKey = os.Getenv("BUILDKITE_S3_SECRET_KEY")
}
creds.SessionToken = os.Getenv("BUILDKITE_S3_SESSION_TOKEN")
if creds.AccessKeyID == "" {
err = errors.New("BUILDKITE_S3_ACCESS_KEY_ID or BUILDKITE_S3_ACCESS_KEY not found in environment")
}
if creds.SecretAccessKey == "" {
err = errors.New("BUILDKITE_S3_SECRET_ACCESS_KEY or BUILDKITE_S3_SECRET_KEY not found in environment")
}
e.retrieved = true
return
}
func (e *credentialsProvider) IsExpired() bool {
return !e.retrieved
}
func awsS3RegionFromEnv() (region string, err error) {
regionName := "us-east-1"
if os.Getenv("BUILDKITE_S3_DEFAULT_REGION") != "" {
regionName = os.Getenv("BUILDKITE_S3_DEFAULT_REGION")
} else {
var err error
regionName, err = awsRegion()
if err != nil {
return "", err
}
}
// Check to make sure the region exists.
resolver := endpoints.DefaultResolver()
partitions := resolver.(endpoints.EnumPartitions).Partitions()
for _, p := range partitions {
for id := range p.Regions() {
if id == regionName {
return regionName, nil
}
}
}
return "", fmt.Errorf("Unknown AWS S3 Region %q", regionName)
}
func awsS3Session(region string) (*session.Session, error) {
// Chicken and egg... but this is kinda how they do it in the sdk
sess, err := session.NewSession()
if err != nil {
return nil, err
}
sess.Config.Region = aws.String(region)
sess.Config.Credentials = credentials.NewChainCredentials(
[]credentials.Provider{
&credentialsProvider{},
&credentials.EnvProvider{},
webIdentityRoleProvider(sess),
// EC2 and ECS meta-data providers
defaults.RemoteCredProvider(*sess.Config, sess.Handlers),
})
return sess, nil
}
func webIdentityRoleProvider(sess *session.Session) *stscreds.WebIdentityRoleProvider {
return stscreds.NewWebIdentityRoleProvider(
sts.New(sess),
os.Getenv("AWS_ROLE_ARN"),
os.Getenv("AWS_ROLE_SESSION_NAME"),
os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"),
)
}
func newS3Client(l logger.Logger, bucket string) (*s3.S3, error) {
region, err := awsS3RegionFromEnv()
if err != nil {
return nil, err
}
sess, err := awsS3Session(region)
if err != nil {
return nil, err
}
l.Debug("Testing AWS S3 credentials and finding bucket `%s` in region `%s`...", bucket, region)
s3client := s3.New(sess)
// Test the authentication by trying to list the first 0 objects in the bucket.
_, err = s3client.ListObjects(&s3.ListObjectsInput{
Bucket: aws.String(bucket),
MaxKeys: aws.Int64(0),
})
if err != nil {
if err == credentials.ErrNoValidProvidersFoundInChain {
hasProxy := os.Getenv("HTTP_PROXY") != "" || os.Getenv("HTTPS_PROXY") != ""
hasNoProxyIdmsException := strings.Contains(os.Getenv("NO_PROXY"), "169.254.169.254")
errorTitle := "Could not authenticate with AWS S3 using any of the included credential providers."
if hasProxy && !hasNoProxyIdmsException {
return nil, fmt.Errorf("%s Your HTTP proxy settings do not grant a NO_PROXY=169.254.169.254 exemption for the instance metadata service, instance profile credentials may not be retrievable via your HTTP proxy.", errorTitle)
}
return nil, fmt.Errorf("%s You can authenticate by setting Buildkite environment variables (BUILDKITE_S3_ACCESS_KEY_ID, BUILDKITE_S3_SECRET_ACCESS_KEY), AWS environment variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY), Web Identity environment variables (AWS_ROLE_ARN, AWS_ROLE_SESSION_NAME, AWS_WEB_IDENTITY_TOKEN_FILE), or if running on AWS EC2 ensuring network access to the EC2 Instance Metadata Service to use an instance profile’s IAM Role credentials.", errorTitle)
}
return nil, fmt.Errorf("Could not s3:ListObjects in your AWS S3 bucket `%s` in region `%s`: (%s)", bucket, region, err.Error())
}
return s3client, nil
}
|
// Copyright (C) 2013 - 2014, Lefteris Zafiris <zaf.000@gmail.com>
// This program is free software, distributed under the terms of
// the BSD 3-Clause License. See the LICENSE file
// at the top of the source tree.
package agi
import (
"bufio"
"bytes"
"io"
"io/ioutil"
"testing"
)
// AGI environment data
var env = []byte(`agi_network: yes
agi_network_script: foo?
agi_request: agi://127.0.0.1/foo?
agi_channel: SIP/1234-00000000
agi_language: en
agi_type: SIP
agi_uniqueid: 1397044468.0
agi_version: 0.1
agi_callerid: 1001
agi_calleridname: 1001
agi_callingpres: 67
agi_callingani2: 0
agi_callington: 0
agi_callingtns: 0
agi_dnid: 123456
agi_rdnis: unknown
agi_context: default
agi_extension: 123456
agi_priority: 1
agi_enhanced: 0.0
agi_accountcode: 0
agi_threadid: -1289290944
agi_arg_1: argument1
agi_arg_2: argument 2
agi_arg_3: 3
`)
var envInv = []byte(`agi_:
agi_arg_1 foo
agi_type:
agi_verylongrandomparameter: 0
a
`)
// AGI Responses
var rep = []byte(`200 result=1
200 result=1 (speech) endpos=1234 results=foo bar
510 Invalid or unknown command
511 Command Not Permitted on a dead channel
520 Invalid command syntax. Proper usage not available.
520-Invalid command syntax. Proper usage follows:
Answers channel if not already in answer state. Returns -1 on channel failure, or 0 if successful.520 End of proper usage.
HANGUP
`)
var repInv = []byte(`200
200 result 1
some random reply that we are not supposed to get
`)
var repVal = []byte(`200 result=1
200 result=1
200 result=1 endpos=1234
200 result=1
200 result=1
200 result=1
HANGUP
`)
// Test AGI environment parsing
func TestParseEnv(t *testing.T) {
// Valid environment data
a := New()
a.buf = bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(env)),
bufio.NewWriter(ioutil.Discard),
)
err := a.parseEnv()
if err != nil {
t.Fatalf("parseEnv failed: %v", err)
}
if len(a.Env) != 25 {
t.Errorf("Error parsing complete AGI environment var list. Expected length: 25, reported: %d", len(a.Env))
}
if a.Env["arg_1"] != "argument1" {
t.Errorf("Error parsing arg1. Expecting: argument1, got: %s", a.Env["arg_1"])
}
if a.Env["arg_2"] != "argument 2" {
t.Errorf("Error parsing arg2. Expecting: argument 2, got: %s", a.Env["arg_2"])
}
if a.Env["arg_3"] != "3" {
t.Errorf("Error parsing arg3. Expecting: 3, got: %s", a.Env["arg_3"])
}
// invalid environment data
b := New()
b.buf = bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(envInv)),
bufio.NewWriter(ioutil.Discard),
)
err = b.parseEnv()
if err == nil {
t.Fatalf("parseEnv failed to detect invalid input: %v", b.Env)
}
}
// Test AGI repsonse parsing
func TestParseRespomse(t *testing.T) {
// Valid responses
a := New()
a.buf = bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(rep)),
bufio.NewWriter(ioutil.Discard),
)
r, err := a.parseResponse()
if err != nil {
t.Errorf("Error parsing AGI 200 response: %v", err)
}
if r.Res != 1 {
t.Errorf("Error parsing AGI 200 response. Expecting: 1, got: %d", r.Res)
}
if r.Dat != "" {
t.Errorf("Error parsing AGI 200 response. Got unexpected data: %d", r.Dat)
}
r, err = a.parseResponse()
if err != nil {
t.Errorf("Error parsing AGI complex 200 response: %v", err)
}
if r.Res != 1 {
t.Errorf("Error parsing AGI complex 200 response. Expecting: 1, got: %d", r.Res)
}
if r.Dat != "(speech) endpos=1234 results=foo bar" {
t.Errorf("Error parsing AGI complex 200 response. Expecting: (speech) endpos=1234 results=foo bar, got: %s", r.Dat)
}
_, err = a.parseResponse()
if err == nil {
t.Error("No error after parsing AGI 510 response.")
}
_, err = a.parseResponse()
if err == nil {
t.Error("No error after parsing AGI 511 response.")
}
_, err = a.parseResponse()
if err == nil {
t.Error("No error after parsing AGI 520 response.")
}
_, err = a.parseResponse()
if err == nil {
t.Error("No error after parsing AGI 520 response containing usage details.")
}
_, err = a.parseResponse()
if err == nil || err.Error() != "HANGUP" {
t.Error("Failed to detect a HANGUP reguest.")
}
// Invalid responses
b := New()
b.buf = bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(repInv)),
bufio.NewWriter(ioutil.Discard),
)
_, err = b.parseResponse()
if err == nil {
t.Error("No error after parsing a partial AGI response.")
}
_, err = b.parseResponse()
if err == nil {
t.Error("No error after parsing a malformed AGI response.")
}
_, err = b.parseResponse()
if err == nil {
t.Error("No error after parsing an empty AGI response.")
}
_, err = b.parseResponse()
if err == nil {
t.Error("No error after parsing an erroneous AGI response.")
}
}
// Test the generation of AGI commands
func TestCmd(t *testing.T) {
var r Reply
var b []byte
buf := bytes.NewBuffer(b)
a := New()
data := append(env, "200 result=1 endpos=1234\n"...)
err := a.Init(
bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(data)),
bufio.NewWriter(io.Writer(buf)),
),
)
if err != nil {
t.Fatalf("Failed to initialize new AGI session: %v", err)
}
r, err = a.StreamFile("echo-test", "*#")
if err != nil {
t.Errorf("Failed to parse AGI responce: %v", err)
}
if buf.Len() == 0 {
t.Error("Failed to send AGI command")
}
str, _ := buf.ReadString(10)
if str != "STREAM FILE echo-test \"*#\"\n" {
t.Errorf("Failed to sent properly formatted AGI command: %s", str)
}
if r.Res != 1 {
t.Errorf("Failed to get the right numeric result. Expecting: 1, got: %d", r.Res)
}
if r.Dat != "1234" {
t.Errorf("Failed to properly parse the rest of the response. Expecting: 1234, got: %s", r.Dat)
}
}
// Benchmark AGI session initialisation
func BenchmarkParseEnv(b *testing.B) {
for i := 0; i < b.N; i++ {
a := New()
a.Init(
bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(env)),
nil,
),
)
}
}
// Benchmark AGI response parsing
func BenchmarkParseRes(b *testing.B) {
read := make([]byte, 0, len(repVal)+len(rep))
read = append(read, repVal...)
read = append(read, rep...)
a := New()
b.ResetTimer()
for i := 0; i < b.N; i++ {
a.buf = bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(read)),
nil,
)
for k := 0; k < 14; k++ {
a.parseResponse()
}
}
}
// Benchmark AGI Session
func BenchmarkSession(b *testing.B) {
read := make([]byte, 0, len(env)+len(repVal))
read = append(read, env...)
read = append(read, repVal...)
b.ResetTimer()
for i := 0; i < b.N; i++ {
a := New()
a.Init(
bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(read)),
bufio.NewWriter(ioutil.Discard),
),
)
a.Answer()
a.Verbose("Hello World")
a.StreamFile("echo-test", "1234567890*#")
a.Exec("Wait", "3")
a.Verbose("Goodbye World")
a.Hangup()
}
}
Some more test cases
// Copyright (C) 2013 - 2014, Lefteris Zafiris <zaf.000@gmail.com>
// This program is free software, distributed under the terms of
// the BSD 3-Clause License. See the LICENSE file
// at the top of the source tree.
package agi
import (
"bufio"
"bytes"
"io"
"io/ioutil"
"testing"
)
// AGI environment data
var env = []byte(`agi_network: yes
agi_network_script: foo?
agi_request: agi://127.0.0.1/foo?
agi_channel: SIP/1234-00000000
agi_language: en
agi_type: SIP
agi_uniqueid: 1397044468.0
agi_version: 0.1
agi_callerid: 1001
agi_calleridname: 1001
agi_callingpres: 67
agi_callingani2: 0
agi_callington: 0
agi_callingtns: 0
agi_dnid: 123456
agi_rdnis: unknown
agi_context: default
agi_extension: 123456
agi_priority: 1
agi_enhanced: 0.0
agi_accountcode: 0
agi_threadid: -1289290944
agi_arg_1: argument1
agi_arg_2: argument 2
agi_arg_3: 3
`)
var envInv = []byte(`agi_:
agi_arg_1 foo
agi_type:
agi_verylongrandomparameter: 0
a
`)
// AGI Responses
var rep = []byte(`200 result=1
200 result=1 (speech) endpos=1234 results=foo bar
510 Invalid or unknown command
511 Command Not Permitted on a dead channel
520 Invalid command syntax. Proper usage not available.
520-Invalid command syntax. Proper usage follows:
Answers channel if not already in answer state. Returns -1 on channel failure, or 0 if successful.520 End of proper usage.
HANGUP
`)
var repInv = []byte(`200
200 result 1
200 result= 1
200 result=
some random reply that we are not supposed to get
`)
var repVal = []byte(`200 result=1
200 result=1
200 result=1 endpos=1234
200 result=1
200 result=1
200 result=1
HANGUP
`)
// Test AGI environment parsing
func TestParseEnv(t *testing.T) {
// Valid environment data
a := New()
a.buf = bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(env)),
bufio.NewWriter(ioutil.Discard),
)
err := a.parseEnv()
if err != nil {
t.Fatalf("parseEnv failed: %v", err)
}
if len(a.Env) != 25 {
t.Errorf("Error parsing complete AGI environment var list. Expected length: 25, reported: %d", len(a.Env))
}
if a.Env["arg_1"] != "argument1" {
t.Errorf("Error parsing arg1. Expecting: argument1, got: %s", a.Env["arg_1"])
}
if a.Env["arg_2"] != "argument 2" {
t.Errorf("Error parsing arg2. Expecting: argument 2, got: %s", a.Env["arg_2"])
}
if a.Env["arg_3"] != "3" {
t.Errorf("Error parsing arg3. Expecting: 3, got: %s", a.Env["arg_3"])
}
// invalid environment data
b := New()
b.buf = bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(envInv)),
bufio.NewWriter(ioutil.Discard),
)
err = b.parseEnv()
if err == nil {
t.Fatalf("parseEnv failed to detect invalid input: %v", b.Env)
}
}
// Test AGI repsonse parsing
func TestParseRespomse(t *testing.T) {
// Valid responses
a := New()
a.buf = bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(rep)),
bufio.NewWriter(ioutil.Discard),
)
r, err := a.parseResponse()
if err != nil {
t.Errorf("Error parsing AGI 200 response: %v", err)
}
if r.Res != 1 {
t.Errorf("Error parsing AGI 200 response. Expecting: 1, got: %d", r.Res)
}
if r.Dat != "" {
t.Errorf("Error parsing AGI 200 response. Got unexpected data: %d", r.Dat)
}
r, err = a.parseResponse()
if err != nil {
t.Errorf("Error parsing AGI complex 200 response: %v", err)
}
if r.Res != 1 {
t.Errorf("Error parsing AGI complex 200 response. Expecting: 1, got: %d", r.Res)
}
if r.Dat != "(speech) endpos=1234 results=foo bar" {
t.Errorf("Error parsing AGI complex 200 response. Expecting: (speech) endpos=1234 results=foo bar, got: %s", r.Dat)
}
_, err = a.parseResponse()
if err == nil {
t.Error("No error after parsing AGI 510 response.")
}
_, err = a.parseResponse()
if err == nil {
t.Error("No error after parsing AGI 511 response.")
}
_, err = a.parseResponse()
if err == nil {
t.Error("No error after parsing AGI 520 response.")
}
_, err = a.parseResponse()
if err == nil {
t.Error("No error after parsing AGI 520 response containing usage details.")
}
_, err = a.parseResponse()
if err == nil || err.Error() != "HANGUP" {
t.Error("Failed to detect a HANGUP reguest.")
}
// Invalid responses
b := New()
b.buf = bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(repInv)),
bufio.NewWriter(ioutil.Discard),
)
_, err = b.parseResponse()
if err == nil {
t.Error("No error after parsing a partial AGI response.")
}
_, err = b.parseResponse()
if err == nil {
t.Error("No error after parsing a malformed AGI response.")
}
_, err = b.parseResponse()
if err == nil {
t.Error("No error after parsing a malformed AGI response.")
}
_, err = b.parseResponse()
if err == nil {
t.Error("No error after parsing a malformed AGI response.")
}
_, err = b.parseResponse()
if err == nil {
t.Error("No error after parsing an empty AGI response.")
}
_, err = b.parseResponse()
if err == nil {
t.Error("No error after parsing an erroneous AGI response.")
}
}
// Test the generation of AGI commands
func TestCmd(t *testing.T) {
var r Reply
var b []byte
buf := bytes.NewBuffer(b)
a := New()
data := append(env, "200 result=1 endpos=1234\n"...)
err := a.Init(
bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(data)),
bufio.NewWriter(io.Writer(buf)),
),
)
if err != nil {
t.Fatalf("Failed to initialize new AGI session: %v", err)
}
r, err = a.StreamFile("echo-test", "*#")
if err != nil {
t.Errorf("Failed to parse AGI responce: %v", err)
}
if buf.Len() == 0 {
t.Error("Failed to send AGI command")
}
str, _ := buf.ReadString(10)
if str != "STREAM FILE echo-test \"*#\"\n" {
t.Errorf("Failed to sent properly formatted AGI command: %s", str)
}
if r.Res != 1 {
t.Errorf("Failed to get the right numeric result. Expecting: 1, got: %d", r.Res)
}
if r.Dat != "1234" {
t.Errorf("Failed to properly parse the rest of the response. Expecting: 1234, got: %s", r.Dat)
}
}
// Benchmark AGI session initialisation
func BenchmarkParseEnv(b *testing.B) {
for i := 0; i < b.N; i++ {
a := New()
a.Init(
bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(env)),
nil,
),
)
}
}
// Benchmark AGI response parsing
func BenchmarkParseRes(b *testing.B) {
read := make([]byte, 0, len(repVal)+len(rep))
read = append(read, repVal...)
read = append(read, rep...)
a := New()
b.ResetTimer()
for i := 0; i < b.N; i++ {
a.buf = bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(read)),
nil,
)
for k := 0; k < 14; k++ {
a.parseResponse()
}
}
}
// Benchmark AGI Session
func BenchmarkSession(b *testing.B) {
read := make([]byte, 0, len(env)+len(repVal))
read = append(read, env...)
read = append(read, repVal...)
b.ResetTimer()
for i := 0; i < b.N; i++ {
a := New()
a.Init(
bufio.NewReadWriter(
bufio.NewReader(bytes.NewReader(read)),
bufio.NewWriter(ioutil.Discard),
),
)
a.Answer()
a.Verbose("Hello World")
a.StreamFile("echo-test", "1234567890*#")
a.Exec("Wait", "3")
a.Verbose("Goodbye World")
a.Hangup()
}
}
|
package social
import (
"fmt"
"gnd.la/app"
"gnd.la/log"
"gnd.la/tasks"
"time"
)
const (
pollInterval = 5 * time.Minute
)
type Sharer struct {
// Name indicates the name of the gnd.la/tasks.Task which will
// be created when scheduling this Sharer. If empty, a name
// will be derived from the service and the Sharer instance.
Name string
service Service
interval time.Duration
provider ShareProvider
config interface{}
task *tasks.Task
}
func (s *Sharer) share(ctx *app.Context) {
last, err := s.provider.LastShare(ctx, s.service)
if err != nil {
log.Errorf("error finding last share time on %s: %s", s.service, err)
return
}
if last.Before(time.Now().Add(-s.interval)) {
item, err := s.provider.Item(ctx, s.service)
if err != nil {
log.Errorf("error finding next time for sharing on %s: %s", s.service, err)
return
}
if item != nil {
result, err := Share(ctx, s.service, item, s.config)
if err != nil {
log.Errorf("error sharing on %s: %s", s.service, err)
}
s.provider.Shared(ctx, s.service, item, result, err)
}
}
}
func (s *Sharer) Schedule(a *app.App, interval time.Duration) {
if s.task != nil {
s.task.Stop()
}
s.interval = interval
name := s.Name
if name == "" {
name = fmt.Sprintf("Sharer.%s.%p", s.service, s)
}
options := &tasks.Options{Name: name}
s.task = tasks.Schedule(a, s.share, options, pollInterval, true)
}
func (s *Sharer) Stop() {
if s.task != nil {
s.task.Stop()
s.task = nil
}
}
func NewSharer(s Service, provider ShareProvider, config interface{}) *Sharer {
if provider == nil {
panic(fmt.Errorf("provider can't be nil"))
}
if err := validateConfig(s, config); err != nil {
panic(err)
}
return &Sharer{
service: s,
provider: provider,
config: config,
}
}
Fix compilation of gnd.la/social
package social
import (
"fmt"
"time"
"gnd.la/app"
"gnd.la/log"
"gnd.la/tasks"
)
const (
pollInterval = 5 * time.Minute
)
type Sharer struct {
// Name indicates the name of the gnd.la/tasks.Task which will
// be created when scheduling this Sharer. If empty, a name
// will be derived from the service and the Sharer instance.
Name string
service Service
interval time.Duration
provider ShareProvider
config interface{}
task *tasks.Task
}
func (s *Sharer) share(ctx *app.Context) {
last, err := s.provider.LastShare(ctx, s.service)
if err != nil {
log.Errorf("error finding last share time on %s: %s", s.service, err)
return
}
if last.Before(time.Now().Add(-s.interval)) {
item, err := s.provider.Item(ctx, s.service)
if err != nil {
log.Errorf("error finding next time for sharing on %s: %s", s.service, err)
return
}
if item != nil {
result, err := Share(ctx, s.service, item, s.config)
if err != nil {
log.Errorf("error sharing on %s: %s", s.service, err)
}
s.provider.Shared(ctx, s.service, item, result, err)
}
}
}
func (s *Sharer) Schedule(a *app.App, interval time.Duration) {
if s.task != nil {
s.task.Stop()
}
s.interval = interval
name := s.Name
if name == "" {
name = fmt.Sprintf("Sharer.%s.%p", s.service, s)
}
s.task = tasks.Schedule(a, s.share, pollInterval, tasks.Name(name), tasks.RunOnListen())
}
func (s *Sharer) Stop() {
if s.task != nil {
s.task.Stop()
s.task = nil
}
}
func NewSharer(s Service, provider ShareProvider, config interface{}) *Sharer {
if provider == nil {
panic(fmt.Errorf("provider can't be nil"))
}
if err := validateConfig(s, config); err != nil {
panic(err)
}
return &Sharer{
service: s,
provider: provider,
config: config,
}
}
|
package metric
import (
"time"
"github.com/cloudfoundry/dropsonde/metrics"
)
type Counter string
func (c Counter) Increment() {
metrics.IncrementCounter(string(c))
}
func (c Counter) Add(i uint64) {
metrics.AddToCounter(string(c), i)
}
type Duration string
func (name Duration) Send(duration time.Duration) {
metrics.SendValue(string(name), float64(duration), "nanos")
}
type Mebibytes string
func (name Mebibytes) Send(mebibytes int) {
metrics.SendValue(string(name), float64(mebibytes), "MiB")
}
type Metric string
func (name Metric) Send(value int) {
metrics.SendValue(string(name), float64(value), "Metric")
}
add Req/s and B/s metrics units
Signed-off-by: Chris Brown <eb0819782e2e967e3d4f4d71eab0c647ed2659cf@pivotal.io>
package metric
import (
"time"
"github.com/cloudfoundry/dropsonde/metrics"
)
type Counter string
func (c Counter) Increment() {
metrics.IncrementCounter(string(c))
}
func (c Counter) Add(i uint64) {
metrics.AddToCounter(string(c), i)
}
type Duration string
func (name Duration) Send(duration time.Duration) {
metrics.SendValue(string(name), float64(duration), "nanos")
}
type Mebibytes string
func (name Mebibytes) Send(mebibytes int) {
metrics.SendValue(string(name), float64(mebibytes), "MiB")
}
type Metric string
func (name Metric) Send(value int) {
metrics.SendValue(string(name), float64(value), "Metric")
}
type Requests string
func (name Requests) Send(value int) {
metrics.SendValue(string(name), float64(value), "Req")
}
type BytesPerSecond string
func (name BytesPerSecond) Send(value float64) {
metrics.SendValue(string(name), value, "B/s")
}
type RequestsPerSecond string
func (name RequestsPerSecond) Send(value float64) {
metrics.SendValue(string(name), value, "Req/s")
}
|
revert changes to assets/bindata.go
|
/*
Copyright 2015 The ContainerOps Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package models
import (
"time"
)
//DockerV1 is Docker Repository V1 repository.
type DockerV1 struct {
Id int64 `json:"id" gorm:"primary_key"`
Namespace string `json:"namespace" sql:"not null;type:varchar(255)" gorm:"unique_index:v1_repository"`
Repository string `json:"repository" sql:"not null;type:varchar(255)" gorm:"unique_index:v1_repository"`
JSON string `json:"json" sql:"null;type:text"`
Manifests string `json:"manifests" sql:"null;type:text"`
Agent string `json:"agent" sql:"null;type:text"`
Description string `json:"description" sql:"null;type:text"`
Size int64 `json:"size" sql:"default:0"`
Locked bool `json:"locked" sql:"default:false"` //When create/update the repository, the locked will be true.
CreatedAt time.Time `json:"created" sql:""`
UpdatedAt time.Time `json:"updated" sql:""`
DeletedAt *time.Time `json:"deleted" sql:"index"`
}
//TableName in mysql is "docker_v1".
func (r *DockerV1) TableName() string {
return "docker_v1"
}
//
type DockerImageV1 struct {
Id int64 `json:"id" gorm:"primary_key"`
ImageId string `json:"imageid" sql:"not null;unique;varchar(255)"`
JSON string `json:"json" sql:"null;type:text"`
Ancestry string `json:"ancestry" sql:"null;type:text"`
Checksum string `json:"checksum" sql:"null;unique;type:varchar(255)"`
Payload string `json:"payload" sql:"null;type:varchar(255)"`
Path string `json:"path" sql:"null;type:text"`
OSS string `json:"oss" sql:"null;type:text"`
Size int64 `json:"size" sql:"default:0"`
Locked bool `json:"locked" sql:"default:false"`
CreatedAt time.Time `json:"created" sql:""`
UpdatedAt time.Time `json:"updated" sql:""`
DeletedAt *time.Time `json:"deleted" sql:"index"`
}
//TableName in mysql is "docker_image_v1".
func (i *DockerImageV1) TableName() string {
return "docker_image_v1"
}
//
type DockerTagV1 struct {
Id int64 `json:"id" gorm:"primary_key"`
DockerV1 int64 `json:"dockerv1" sql:"not null"`
Tag string `json:"tag" sql:"not null;varchar(255)"`
ImageId string `json:"imageid" sql:"not null;varchar(255)"`
CreatedAt time.Time `json:"created" sql:""`
UpdatedAt time.Time `json:"updated" sql:""`
DeletedAt *time.Time `json:"deleted" sql:"index"`
}
//TableName in mysql is "docker_tag_v1".
func (t *DockerTagV1) TableName() string {
return "docker_tag_v1"
}
//Save function save all properties of Docker Registry V1 repository.
func (r *DockerV1) Save(namespace, repository string) error {
return nil
}
//Put function will create or update repository.
func (r *DockerV1) Put(namespace, repository, json, agent string) error {
return nil
}
Update the repository PUT function.
/*
Copyright 2015 The ContainerOps Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package models
import (
"time"
)
//DockerV1 is Docker Repository V1 repository.
type DockerV1 struct {
Id int64 `json:"id" gorm:"primary_key"`
Namespace string `json:"namespace" sql:"not null;type:varchar(255)" gorm:"unique_index:v1_repository"`
Repository string `json:"repository" sql:"not null;type:varchar(255)" gorm:"unique_index:v1_repository"`
JSON string `json:"json" sql:"null;type:text"`
Manifests string `json:"manifests" sql:"null;type:text"`
Agent string `json:"agent" sql:"null;type:text"`
Description string `json:"description" sql:"null;type:text"`
Size int64 `json:"size" sql:"default:0"`
Locked bool `json:"locked" sql:"default:false"` //When create/update the repository, the locked will be true.
CreatedAt time.Time `json:"created" sql:""`
UpdatedAt time.Time `json:"updated" sql:""`
DeletedAt *time.Time `json:"deleted" sql:"index"`
}
//TableName in mysql is "docker_v1".
func (r *DockerV1) TableName() string {
return "docker_v1"
}
//
type DockerImageV1 struct {
Id int64 `json:"id" gorm:"primary_key"`
ImageId string `json:"imageid" sql:"not null;unique;varchar(255)"`
JSON string `json:"json" sql:"null;type:text"`
Ancestry string `json:"ancestry" sql:"null;type:text"`
Checksum string `json:"checksum" sql:"null;unique;type:varchar(255)"`
Payload string `json:"payload" sql:"null;type:varchar(255)"`
Path string `json:"path" sql:"null;type:text"`
OSS string `json:"oss" sql:"null;type:text"`
Size int64 `json:"size" sql:"default:0"`
Locked bool `json:"locked" sql:"default:false"`
CreatedAt time.Time `json:"created" sql:""`
UpdatedAt time.Time `json:"updated" sql:""`
DeletedAt *time.Time `json:"deleted" sql:"index"`
}
//TableName in mysql is "docker_image_v1".
func (i *DockerImageV1) TableName() string {
return "docker_image_v1"
}
//
type DockerTagV1 struct {
Id int64 `json:"id" gorm:"primary_key"`
DockerV1 int64 `json:"dockerv1" sql:"not null"`
Tag string `json:"tag" sql:"not null;varchar(255)"`
ImageId string `json:"imageid" sql:"not null;varchar(255)"`
CreatedAt time.Time `json:"created" sql:""`
UpdatedAt time.Time `json:"updated" sql:""`
DeletedAt *time.Time `json:"deleted" sql:"index"`
}
//TableName in mysql is "docker_tag_v1".
func (t *DockerTagV1) TableName() string {
return "docker_tag_v1"
}
//Save function save all properties of Docker Registry V1 repository.
func (r *DockerV1) Save(namespace, repository string) error {
return nil
}
//Put function will create or update repository.
func (r *DockerV1) Put(namespace, repository, json, agent string) error {
r.Namespace, r.Repository, r.JSON, r.Agent, r.Locked = namespace, repository, json, agent, true
tx := db.Begin()
if err := db.Debug().Where("namespace = ? AND repository = ? ", namespace, repository).FirstOrCreate(&r).Error; err != nil {
tx.Rollback()
return err
}
if err := tx.Debug().Model(&r).Updates(map[string]interface{}{"json": json, "agent": agent, "locked": true}).Error; err != nil {
tx.Rollback()
return err
} else if err == nil {
tx.Commit()
return nil
}
tx.Commit()
return nil
}
|
package routing
import (
"bytes"
"fmt"
"runtime"
"sync"
"sync/atomic"
"time"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/htlcswitch"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/multimutex"
"github.com/lightningnetwork/lnd/routing/chainview"
"github.com/lightningnetwork/lnd/routing/route"
"github.com/lightningnetwork/lnd/ticker"
"github.com/lightningnetwork/lnd/tlv"
"github.com/lightningnetwork/lnd/zpay32"
)
const (
// DefaultPayAttemptTimeout is the default payment attempt timeout. The
// payment attempt timeout defines the duration after which we stop
// trying more routes for a payment.
DefaultPayAttemptTimeout = time.Duration(time.Second * 60)
// DefaultChannelPruneExpiry is the default duration used to determine
// if a channel should be pruned or not.
DefaultChannelPruneExpiry = time.Duration(time.Hour * 24 * 14)
// defaultStatInterval governs how often the router will log non-empty
// stats related to processing new channels, updates, or node
// announcements.
defaultStatInterval = time.Minute
)
var (
// ErrRouterShuttingDown is returned if the router is in the process of
// shutting down.
ErrRouterShuttingDown = fmt.Errorf("router shutting down")
)
// ChannelGraphSource represents the source of information about the topology
// of the lightning network. It's responsible for the addition of nodes, edges,
// applying edge updates, and returning the current block height with which the
// topology is synchronized.
type ChannelGraphSource interface {
// AddNode is used to add information about a node to the router
// database. If the node with this pubkey is not present in an existing
// channel, it will be ignored.
AddNode(node *channeldb.LightningNode) error
// AddEdge is used to add edge/channel to the topology of the router,
// after all information about channel will be gathered this
// edge/channel might be used in construction of payment path.
AddEdge(edge *channeldb.ChannelEdgeInfo) error
// AddProof updates the channel edge info with proof which is needed to
// properly announce the edge to the rest of the network.
AddProof(chanID lnwire.ShortChannelID, proof *channeldb.ChannelAuthProof) error
// UpdateEdge is used to update edge information, without this message
// edge considered as not fully constructed.
UpdateEdge(policy *channeldb.ChannelEdgePolicy) error
// IsStaleNode returns true if the graph source has a node announcement
// for the target node with a more recent timestamp. This method will
// also return true if we don't have an active channel announcement for
// the target node.
IsStaleNode(node route.Vertex, timestamp time.Time) bool
// IsPublicNode determines whether the given vertex is seen as a public
// node in the graph from the graph's source node's point of view.
IsPublicNode(node route.Vertex) (bool, error)
// IsKnownEdge returns true if the graph source already knows of the
// passed channel ID either as a live or zombie edge.
IsKnownEdge(chanID lnwire.ShortChannelID) bool
// IsStaleEdgePolicy returns true if the graph source has a channel
// edge for the passed channel ID (and flags) that have a more recent
// timestamp.
IsStaleEdgePolicy(chanID lnwire.ShortChannelID, timestamp time.Time,
flags lnwire.ChanUpdateChanFlags) bool
// MarkEdgeLive clears an edge from our zombie index, deeming it as
// live.
MarkEdgeLive(chanID lnwire.ShortChannelID) error
// ForAllOutgoingChannels is used to iterate over all channels
// emanating from the "source" node which is the center of the
// star-graph.
ForAllOutgoingChannels(cb func(c *channeldb.ChannelEdgeInfo,
e *channeldb.ChannelEdgePolicy) error) error
// CurrentBlockHeight returns the block height from POV of the router
// subsystem.
CurrentBlockHeight() (uint32, error)
// GetChannelByID return the channel by the channel id.
GetChannelByID(chanID lnwire.ShortChannelID) (*channeldb.ChannelEdgeInfo,
*channeldb.ChannelEdgePolicy, *channeldb.ChannelEdgePolicy, error)
// FetchLightningNode attempts to look up a target node by its identity
// public key. channeldb.ErrGraphNodeNotFound is returned if the node
// doesn't exist within the graph.
FetchLightningNode(route.Vertex) (*channeldb.LightningNode, error)
// ForEachNode is used to iterate over every node in the known graph.
ForEachNode(func(node *channeldb.LightningNode) error) error
// ForEachChannel is used to iterate over every channel in the known
// graph.
ForEachChannel(func(chanInfo *channeldb.ChannelEdgeInfo,
e1, e2 *channeldb.ChannelEdgePolicy) error) error
}
// PaymentAttemptDispatcher is used by the router to send payment attempts onto
// the network, and receive their results.
type PaymentAttemptDispatcher interface {
// SendHTLC is a function that directs a link-layer switch to
// forward a fully encoded payment to the first hop in the route
// denoted by its public key. A non-nil error is to be returned if the
// payment was unsuccessful.
SendHTLC(firstHop lnwire.ShortChannelID,
paymentID uint64,
htlcAdd *lnwire.UpdateAddHTLC) error
// GetPaymentResult returns the the result of the payment attempt with
// the given paymentID. The method returns a channel where the payment
// result will be sent when available, or an error is encountered
// during forwarding. When a result is received on the channel, the
// HTLC is guaranteed to no longer be in flight. The switch shutting
// down is signaled by closing the channel. If the paymentID is
// unknown, ErrPaymentIDNotFound will be returned.
GetPaymentResult(paymentID uint64, paymentHash lntypes.Hash,
deobfuscator htlcswitch.ErrorDecrypter) (
<-chan *htlcswitch.PaymentResult, error)
}
// PaymentSessionSource is an interface that defines a source for the router to
// retrive new payment sessions.
type PaymentSessionSource interface {
// NewPaymentSession creates a new payment session that will produce
// routes to the given target. An optional set of routing hints can be
// provided in order to populate additional edges to explore when
// finding a path to the payment's destination.
NewPaymentSession(routeHints [][]zpay32.HopHint,
target route.Vertex) (PaymentSession, error)
// NewPaymentSessionForRoute creates a new paymentSession instance that
// is just used for failure reporting to missioncontrol, and will only
// attempt the given route.
NewPaymentSessionForRoute(preBuiltRoute *route.Route) PaymentSession
// NewPaymentSessionEmpty creates a new paymentSession instance that is
// empty, and will be exhausted immediately. Used for failure reporting
// to missioncontrol for resumed payment we don't want to make more
// attempts for.
NewPaymentSessionEmpty() PaymentSession
}
// MissionController is an interface that exposes failure reporting and
// probability estimation.
type MissionController interface {
// ReportPaymentFail reports a failed payment to mission control as
// input for future probability estimates. It returns a bool indicating
// whether this error is a final error and no further payment attempts
// need to be made.
ReportPaymentFail(paymentID uint64, rt *route.Route,
failureSourceIdx *int, failure lnwire.FailureMessage) (
*channeldb.FailureReason, error)
// ReportPaymentSuccess reports a successful payment to mission control as input
// for future probability estimates.
ReportPaymentSuccess(paymentID uint64, rt *route.Route) error
// GetProbability is expected to return the success probability of a
// payment from fromNode along edge.
GetProbability(fromNode, toNode route.Vertex,
amt lnwire.MilliSatoshi) float64
}
// FeeSchema is the set fee configuration for a Lightning Node on the network.
// Using the coefficients described within the schema, the required fee to
// forward outgoing payments can be derived.
type FeeSchema struct {
// BaseFee is the base amount of milli-satoshis that will be chained
// for ANY payment forwarded.
BaseFee lnwire.MilliSatoshi
// FeeRate is the rate that will be charged for forwarding payments.
// This value should be interpreted as the numerator for a fraction
// (fixed point arithmetic) whose denominator is 1 million. As a result
// the effective fee rate charged per mSAT will be: (amount *
// FeeRate/1,000,000).
FeeRate uint32
}
// ChannelPolicy holds the parameters that determine the policy we enforce
// when forwarding payments on a channel. These parameters are communicated
// to the rest of the network in ChannelUpdate messages.
type ChannelPolicy struct {
// FeeSchema holds the fee configuration for a channel.
FeeSchema
// TimeLockDelta is the required HTLC timelock delta to be used
// when forwarding payments.
TimeLockDelta uint32
// MaxHTLC is the maximum HTLC size including fees we are allowed to
// forward over this channel.
MaxHTLC lnwire.MilliSatoshi
}
// Config defines the configuration for the ChannelRouter. ALL elements within
// the configuration MUST be non-nil for the ChannelRouter to carry out its
// duties.
type Config struct {
// Graph is the channel graph that the ChannelRouter will use to gather
// metrics from and also to carry out path finding queries.
// TODO(roasbeef): make into an interface
Graph *channeldb.ChannelGraph
// Chain is the router's source to the most up-to-date blockchain data.
// All incoming advertised channels will be checked against the chain
// to ensure that the channels advertised are still open.
Chain lnwallet.BlockChainIO
// ChainView is an instance of a FilteredChainView which is used to
// watch the sub-set of the UTXO set (the set of active channels) that
// we need in order to properly maintain the channel graph.
ChainView chainview.FilteredChainView
// Payer is an instance of a PaymentAttemptDispatcher and is used by
// the router to send payment attempts onto the network, and receive
// their results.
Payer PaymentAttemptDispatcher
// Control keeps track of the status of ongoing payments, ensuring we
// can properly resume them across restarts.
Control ControlTower
// MissionControl is a shared memory of sorts that executions of
// payment path finding use in order to remember which vertexes/edges
// were pruned from prior attempts. During SendPayment execution,
// errors sent by nodes are mapped into a vertex or edge to be pruned.
// Each run will then take into account this set of pruned
// vertexes/edges to reduce route failure and pass on graph information
// gained to the next execution.
MissionControl MissionController
// SessionSource defines a source for the router to retrieve new payment
// sessions.
SessionSource PaymentSessionSource
// ChannelPruneExpiry is the duration used to determine if a channel
// should be pruned or not. If the delta between now and when the
// channel was last updated is greater than ChannelPruneExpiry, then
// the channel is marked as a zombie channel eligible for pruning.
ChannelPruneExpiry time.Duration
// GraphPruneInterval is used as an interval to determine how often we
// should examine the channel graph to garbage collect zombie channels.
GraphPruneInterval time.Duration
// QueryBandwidth is a method that allows the router to query the lower
// link layer to determine the up to date available bandwidth at a
// prospective link to be traversed. If the link isn't available, then
// a value of zero should be returned. Otherwise, the current up to
// date knowledge of the available bandwidth of the link should be
// returned.
QueryBandwidth func(edge *channeldb.ChannelEdgeInfo) lnwire.MilliSatoshi
// NextPaymentID is a method that guarantees to return a new, unique ID
// each time it is called. This is used by the router to generate a
// unique payment ID for each payment it attempts to send, such that
// the switch can properly handle the HTLC.
NextPaymentID func() (uint64, error)
// AssumeChannelValid toggles whether or not the router will check for
// spentness of channel outpoints. For neutrino, this saves long rescans
// from blocking initial usage of the daemon.
AssumeChannelValid bool
// PathFindingConfig defines global path finding parameters.
PathFindingConfig PathFindingConfig
}
// EdgeLocator is a struct used to identify a specific edge.
type EdgeLocator struct {
// ChannelID is the channel of this edge.
ChannelID uint64
// Direction takes the value of 0 or 1 and is identical in definition to
// the channel direction flag. A value of 0 means the direction from the
// lower node pubkey to the higher.
Direction uint8
}
// String returns a human readable version of the edgeLocator values.
func (e *EdgeLocator) String() string {
return fmt.Sprintf("%v:%v", e.ChannelID, e.Direction)
}
// ChannelRouter is the layer 3 router within the Lightning stack. Below the
// ChannelRouter is the HtlcSwitch, and below that is the Bitcoin blockchain
// itself. The primary role of the ChannelRouter is to respond to queries for
// potential routes that can support a payment amount, and also general graph
// reachability questions. The router will prune the channel graph
// automatically as new blocks are discovered which spend certain known funding
// outpoints, thereby closing their respective channels.
type ChannelRouter struct {
ntfnClientCounter uint64 // To be used atomically.
started uint32 // To be used atomically.
stopped uint32 // To be used atomically.
bestHeight uint32 // To be used atomically.
// cfg is a copy of the configuration struct that the ChannelRouter was
// initialized with.
cfg *Config
// selfNode is the center of the star-graph centered around the
// ChannelRouter. The ChannelRouter uses this node as a starting point
// when doing any path finding.
selfNode *channeldb.LightningNode
// newBlocks is a channel in which new blocks connected to the end of
// the main chain are sent over, and blocks updated after a call to
// UpdateFilter.
newBlocks <-chan *chainview.FilteredBlock
// staleBlocks is a channel in which blocks disconnected fromt the end
// of our currently known best chain are sent over.
staleBlocks <-chan *chainview.FilteredBlock
// networkUpdates is a channel that carries new topology updates
// messages from outside the ChannelRouter to be processed by the
// networkHandler.
networkUpdates chan *routingMsg
// topologyClients maps a client's unique notification ID to a
// topologyClient client that contains its notification dispatch
// channel.
topologyClients map[uint64]*topologyClient
// ntfnClientUpdates is a channel that's used to send new updates to
// topology notification clients to the ChannelRouter. Updates either
// add a new notification client, or cancel notifications for an
// existing client.
ntfnClientUpdates chan *topologyClientUpdate
// channelEdgeMtx is a mutex we use to make sure we process only one
// ChannelEdgePolicy at a time for a given channelID, to ensure
// consistency between the various database accesses.
channelEdgeMtx *multimutex.Mutex
// statTicker is a resumable ticker that logs the router's progress as
// it discovers channels or receives updates.
statTicker ticker.Ticker
// stats tracks newly processed channels, updates, and node
// announcements over a window of defaultStatInterval.
stats *routerStats
sync.RWMutex
quit chan struct{}
wg sync.WaitGroup
}
// A compile time check to ensure ChannelRouter implements the
// ChannelGraphSource interface.
var _ ChannelGraphSource = (*ChannelRouter)(nil)
// New creates a new instance of the ChannelRouter with the specified
// configuration parameters. As part of initialization, if the router detects
// that the channel graph isn't fully in sync with the latest UTXO (since the
// channel graph is a subset of the UTXO set) set, then the router will proceed
// to fully sync to the latest state of the UTXO set.
func New(cfg Config) (*ChannelRouter, error) {
selfNode, err := cfg.Graph.SourceNode()
if err != nil {
return nil, err
}
r := &ChannelRouter{
cfg: &cfg,
networkUpdates: make(chan *routingMsg),
topologyClients: make(map[uint64]*topologyClient),
ntfnClientUpdates: make(chan *topologyClientUpdate),
channelEdgeMtx: multimutex.NewMutex(),
selfNode: selfNode,
statTicker: ticker.New(defaultStatInterval),
stats: new(routerStats),
quit: make(chan struct{}),
}
return r, nil
}
// Start launches all the goroutines the ChannelRouter requires to carry out
// its duties. If the router has already been started, then this method is a
// noop.
func (r *ChannelRouter) Start() error {
if !atomic.CompareAndSwapUint32(&r.started, 0, 1) {
return nil
}
log.Tracef("Channel Router starting")
bestHash, bestHeight, err := r.cfg.Chain.GetBestBlock()
if err != nil {
return err
}
// If the graph has never been pruned, or hasn't fully been created yet,
// then we don't treat this as an explicit error.
if _, _, err := r.cfg.Graph.PruneTip(); err != nil {
switch {
case err == channeldb.ErrGraphNeverPruned:
fallthrough
case err == channeldb.ErrGraphNotFound:
// If the graph has never been pruned, then we'll set
// the prune height to the current best height of the
// chain backend.
_, err = r.cfg.Graph.PruneGraph(
nil, bestHash, uint32(bestHeight),
)
if err != nil {
return err
}
default:
return err
}
}
// If AssumeChannelValid is present, then we won't rely on pruning
// channels from the graph based on their spentness, but whether they
// are considered zombies or not.
if r.cfg.AssumeChannelValid {
if err := r.pruneZombieChans(); err != nil {
return err
}
} else {
// Otherwise, we'll use our filtered chain view to prune
// channels as soon as they are detected as spent on-chain.
if err := r.cfg.ChainView.Start(); err != nil {
return err
}
// Once the instance is active, we'll fetch the channel we'll
// receive notifications over.
r.newBlocks = r.cfg.ChainView.FilteredBlocks()
r.staleBlocks = r.cfg.ChainView.DisconnectedBlocks()
// Before we perform our manual block pruning, we'll construct
// and apply a fresh chain filter to the active
// FilteredChainView instance. We do this before, as otherwise
// we may miss on-chain events as the filter hasn't properly
// been applied.
channelView, err := r.cfg.Graph.ChannelView()
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
return err
}
log.Infof("Filtering chain using %v channels active",
len(channelView))
if len(channelView) != 0 {
err = r.cfg.ChainView.UpdateFilter(
channelView, uint32(bestHeight),
)
if err != nil {
return err
}
}
// Before we begin normal operation of the router, we first need
// to synchronize the channel graph to the latest state of the
// UTXO set.
if err := r.syncGraphWithChain(); err != nil {
return err
}
// Finally, before we proceed, we'll prune any unconnected nodes
// from the graph in order to ensure we maintain a tight graph
// of "useful" nodes.
err = r.cfg.Graph.PruneGraphNodes()
if err != nil && err != channeldb.ErrGraphNodesNotFound {
return err
}
}
// If any payments are still in flight, we resume, to make sure their
// results are properly handled.
payments, err := r.cfg.Control.FetchInFlightPayments()
if err != nil {
return err
}
for _, payment := range payments {
log.Infof("Resuming payment with hash %v", payment.Info.PaymentHash)
r.wg.Add(1)
go func(payment *channeldb.InFlightPayment) {
defer r.wg.Done()
// We create a dummy, empty payment session such that
// we won't make another payment attempt when the
// result for the in-flight attempt is received.
//
// PayAttemptTime doesn't need to be set, as there is
// only a single attempt.
paySession := r.cfg.SessionSource.NewPaymentSessionEmpty()
lPayment := &LightningPayment{
PaymentHash: payment.Info.PaymentHash,
}
_, _, err = r.sendPayment(payment.Attempt, lPayment, paySession)
if err != nil {
log.Errorf("Resuming payment with hash %v "+
"failed: %v.", payment.Info.PaymentHash, err)
return
}
log.Infof("Resumed payment with hash %v completed.",
payment.Info.PaymentHash)
}(payment)
}
r.wg.Add(1)
go r.networkHandler()
return nil
}
// Stop signals the ChannelRouter to gracefully halt all routines. This method
// will *block* until all goroutines have excited. If the channel router has
// already stopped then this method will return immediately.
func (r *ChannelRouter) Stop() error {
if !atomic.CompareAndSwapUint32(&r.stopped, 0, 1) {
return nil
}
log.Tracef("Channel Router shutting down")
// Our filtered chain view could've only been started if
// AssumeChannelValid isn't present.
if !r.cfg.AssumeChannelValid {
if err := r.cfg.ChainView.Stop(); err != nil {
return err
}
}
close(r.quit)
r.wg.Wait()
return nil
}
// syncGraphWithChain attempts to synchronize the current channel graph with
// the latest UTXO set state. This process involves pruning from the channel
// graph any channels which have been closed by spending their funding output
// since we've been down.
func (r *ChannelRouter) syncGraphWithChain() error {
// First, we'll need to check to see if we're already in sync with the
// latest state of the UTXO set.
bestHash, bestHeight, err := r.cfg.Chain.GetBestBlock()
if err != nil {
return err
}
r.bestHeight = uint32(bestHeight)
pruneHash, pruneHeight, err := r.cfg.Graph.PruneTip()
if err != nil {
switch {
// If the graph has never been pruned, or hasn't fully been
// created yet, then we don't treat this as an explicit error.
case err == channeldb.ErrGraphNeverPruned:
case err == channeldb.ErrGraphNotFound:
default:
return err
}
}
log.Infof("Prune tip for Channel Graph: height=%v, hash=%v", pruneHeight,
pruneHash)
switch {
// If the graph has never been pruned, then we can exit early as this
// entails it's being created for the first time and hasn't seen any
// block or created channels.
case pruneHeight == 0 || pruneHash == nil:
return nil
// If the block hashes and heights match exactly, then we don't need to
// prune the channel graph as we're already fully in sync.
case bestHash.IsEqual(pruneHash) && uint32(bestHeight) == pruneHeight:
return nil
}
// If the main chain blockhash at prune height is different from the
// prune hash, this might indicate the database is on a stale branch.
mainBlockHash, err := r.cfg.Chain.GetBlockHash(int64(pruneHeight))
if err != nil {
return err
}
// While we are on a stale branch of the chain, walk backwards to find
// first common block.
for !pruneHash.IsEqual(mainBlockHash) {
log.Infof("channel graph is stale. Disconnecting block %v "+
"(hash=%v)", pruneHeight, pruneHash)
// Prune the graph for every channel that was opened at height
// >= pruneHeight.
_, err := r.cfg.Graph.DisconnectBlockAtHeight(pruneHeight)
if err != nil {
return err
}
pruneHash, pruneHeight, err = r.cfg.Graph.PruneTip()
if err != nil {
switch {
// If at this point the graph has never been pruned, we
// can exit as this entails we are back to the point
// where it hasn't seen any block or created channels,
// alas there's nothing left to prune.
case err == channeldb.ErrGraphNeverPruned:
return nil
case err == channeldb.ErrGraphNotFound:
return nil
default:
return err
}
}
mainBlockHash, err = r.cfg.Chain.GetBlockHash(int64(pruneHeight))
if err != nil {
return err
}
}
log.Infof("Syncing channel graph from height=%v (hash=%v) to height=%v "+
"(hash=%v)", pruneHeight, pruneHash, bestHeight, bestHash)
// If we're not yet caught up, then we'll walk forward in the chain
// pruning the channel graph with each new block that hasn't yet been
// consumed by the channel graph.
var spentOutputs []*wire.OutPoint
for nextHeight := pruneHeight + 1; nextHeight <= uint32(bestHeight); nextHeight++ {
// Break out of the rescan early if a shutdown has been
// requested, otherwise long rescans will block the daemon from
// shutting down promptly.
select {
case <-r.quit:
return ErrRouterShuttingDown
default:
}
// Using the next height, request a manual block pruning from
// the chainview for the particular block hash.
nextHash, err := r.cfg.Chain.GetBlockHash(int64(nextHeight))
if err != nil {
return err
}
filterBlock, err := r.cfg.ChainView.FilterBlock(nextHash)
if err != nil {
return err
}
// We're only interested in all prior outputs that have been
// spent in the block, so collate all the referenced previous
// outpoints within each tx and input.
for _, tx := range filterBlock.Transactions {
for _, txIn := range tx.TxIn {
spentOutputs = append(spentOutputs,
&txIn.PreviousOutPoint)
}
}
}
// With the spent outputs gathered, attempt to prune the channel graph,
// also passing in the best hash+height so the prune tip can be updated.
closedChans, err := r.cfg.Graph.PruneGraph(
spentOutputs, bestHash, uint32(bestHeight),
)
if err != nil {
return err
}
log.Infof("Graph pruning complete: %v channels were closed since "+
"height %v", len(closedChans), pruneHeight)
return nil
}
// pruneZombieChans is a method that will be called periodically to prune out
// any "zombie" channels. We consider channels zombies if *both* edges haven't
// been updated since our zombie horizon. If AssumeChannelValid is present,
// we'll also consider channels zombies if *both* edges are disabled. This
// usually signals that a channel has been closed on-chain. We do this
// periodically to keep a healthy, lively routing table.
func (r *ChannelRouter) pruneZombieChans() error {
chansToPrune := make(map[uint64]struct{})
chanExpiry := r.cfg.ChannelPruneExpiry
log.Infof("Examining channel graph for zombie channels")
// A helper method to detect if the channel belongs to this node
isSelfChannelEdge := func(info *channeldb.ChannelEdgeInfo) bool {
return info.NodeKey1Bytes == r.selfNode.PubKeyBytes ||
info.NodeKey2Bytes == r.selfNode.PubKeyBytes
}
// First, we'll collect all the channels which are eligible for garbage
// collection due to being zombies.
filterPruneChans := func(info *channeldb.ChannelEdgeInfo,
e1, e2 *channeldb.ChannelEdgePolicy) error {
// Exit early in case this channel is already marked to be pruned
if _, markedToPrune := chansToPrune[info.ChannelID]; markedToPrune {
return nil
}
// We'll ensure that we don't attempt to prune our *own*
// channels from the graph, as in any case this should be
// re-advertised by the sub-system above us.
if isSelfChannelEdge(info) {
return nil
}
// If *both* edges haven't been updated for a period of
// chanExpiry, then we'll mark the channel itself as eligible
// for graph pruning.
var e1Zombie, e2Zombie bool
if e1 != nil {
e1Zombie = time.Since(e1.LastUpdate) >= chanExpiry
if e1Zombie {
log.Tracef("Edge #1 of ChannelID(%v) last "+
"update: %v", info.ChannelID,
e1.LastUpdate)
}
}
if e2 != nil {
e2Zombie = time.Since(e2.LastUpdate) >= chanExpiry
if e2Zombie {
log.Tracef("Edge #2 of ChannelID(%v) last "+
"update: %v", info.ChannelID,
e2.LastUpdate)
}
}
// If the channel is not considered zombie, we can move on to
// the next.
if !e1Zombie || !e2Zombie {
return nil
}
log.Debugf("ChannelID(%v) is a zombie, collecting to prune",
info.ChannelID)
// TODO(roasbeef): add ability to delete single directional edge
chansToPrune[info.ChannelID] = struct{}{}
return nil
}
// If AssumeChannelValid is present we'll look at the disabled bit for both
// edges. If they're both disabled, then we can interpret this as the
// channel being closed and can prune it from our graph.
if r.cfg.AssumeChannelValid {
disabledChanIDs, err := r.cfg.Graph.DisabledChannelIDs()
if err != nil {
return fmt.Errorf("unable to get disabled channels ids "+
"chans: %v", err)
}
disabledEdges, err := r.cfg.Graph.FetchChanInfos(disabledChanIDs)
if err != nil {
return fmt.Errorf("unable to fetch disabled channels edges "+
"chans: %v", err)
}
// Ensuring we won't prune our own channel from the graph.
for _, disabledEdge := range disabledEdges {
if !isSelfChannelEdge(disabledEdge.Info) {
chansToPrune[disabledEdge.Info.ChannelID] = struct{}{}
}
}
}
startTime := time.Unix(0, 0)
endTime := time.Now().Add(-1 * chanExpiry)
oldEdges, err := r.cfg.Graph.ChanUpdatesInHorizon(startTime, endTime)
if err != nil {
return fmt.Errorf("unable to fetch expired channel updates "+
"chans: %v", err)
}
for _, u := range oldEdges {
filterPruneChans(u.Info, u.Policy1, u.Policy2)
}
log.Infof("Pruning %v zombie channels", len(chansToPrune))
// With the set of zombie-like channels obtained, we'll do another pass
// to delete them from the channel graph.
toPrune := make([]uint64, 0, len(chansToPrune))
for chanID := range chansToPrune {
toPrune = append(toPrune, chanID)
log.Tracef("Pruning zombie channel with ChannelID(%v)", chanID)
}
if err := r.cfg.Graph.DeleteChannelEdges(toPrune...); err != nil {
return fmt.Errorf("unable to delete zombie channels: %v", err)
}
// With the channels pruned, we'll also attempt to prune any nodes that
// were a part of them.
err = r.cfg.Graph.PruneGraphNodes()
if err != nil && err != channeldb.ErrGraphNodesNotFound {
return fmt.Errorf("unable to prune graph nodes: %v", err)
}
return nil
}
// networkHandler is the primary goroutine for the ChannelRouter. The roles of
// this goroutine include answering queries related to the state of the
// network, pruning the graph on new block notification, applying network
// updates, and registering new topology clients.
//
// NOTE: This MUST be run as a goroutine.
func (r *ChannelRouter) networkHandler() {
defer r.wg.Done()
graphPruneTicker := time.NewTicker(r.cfg.GraphPruneInterval)
defer graphPruneTicker.Stop()
r.statTicker.Resume()
defer r.statTicker.Stop()
r.stats.Reset()
// We'll use this validation barrier to ensure that we process all jobs
// in the proper order during parallel validation.
validationBarrier := NewValidationBarrier(runtime.NumCPU()*4, r.quit)
for {
select {
// A new fully validated network update has just arrived. As a
// result we'll modify the channel graph accordingly depending
// on the exact type of the message.
case update := <-r.networkUpdates:
// We'll set up any dependants, and wait until a free
// slot for this job opens up, this allow us to not
// have thousands of goroutines active.
validationBarrier.InitJobDependencies(update.msg)
r.wg.Add(1)
go func() {
defer r.wg.Done()
defer validationBarrier.CompleteJob()
// If this message has an existing dependency,
// then we'll wait until that has been fully
// validated before we proceed.
err := validationBarrier.WaitForDependants(
update.msg,
)
if err != nil {
if err != ErrVBarrierShuttingDown {
log.Warnf("unexpected error "+
"during validation "+
"barrier shutdown: %v",
err)
}
return
}
// Process the routing update to determine if
// this is either a new update from our PoV or
// an update to a prior vertex/edge we
// previously accepted.
err = r.processUpdate(update.msg)
update.err <- err
// If this message had any dependencies, then
// we can now signal them to continue.
validationBarrier.SignalDependants(update.msg)
if err != nil {
return
}
// Send off a new notification for the newly
// accepted update.
topChange := &TopologyChange{}
err = addToTopologyChange(
r.cfg.Graph, topChange, update.msg,
)
if err != nil {
log.Errorf("unable to update topology "+
"change notification: %v", err)
return
}
if !topChange.isEmpty() {
r.notifyTopologyChange(topChange)
}
}()
// TODO(roasbeef): remove all unconnected vertexes
// after N blocks pass with no corresponding
// announcements.
case chainUpdate, ok := <-r.staleBlocks:
// If the channel has been closed, then this indicates
// the daemon is shutting down, so we exit ourselves.
if !ok {
return
}
// Since this block is stale, we update our best height
// to the previous block.
blockHeight := uint32(chainUpdate.Height)
atomic.StoreUint32(&r.bestHeight, blockHeight-1)
// Update the channel graph to reflect that this block
// was disconnected.
_, err := r.cfg.Graph.DisconnectBlockAtHeight(blockHeight)
if err != nil {
log.Errorf("unable to prune graph with stale "+
"block: %v", err)
continue
}
// TODO(halseth): notify client about the reorg?
// A new block has arrived, so we can prune the channel graph
// of any channels which were closed in the block.
case chainUpdate, ok := <-r.newBlocks:
// If the channel has been closed, then this indicates
// the daemon is shutting down, so we exit ourselves.
if !ok {
return
}
// We'll ensure that any new blocks received attach
// directly to the end of our main chain. If not, then
// we've somehow missed some blocks. We don't process
// this block as otherwise, we may miss on-chain
// events.
currentHeight := atomic.LoadUint32(&r.bestHeight)
if chainUpdate.Height != currentHeight+1 {
log.Errorf("out of order block: expecting "+
"height=%v, got height=%v", currentHeight+1,
chainUpdate.Height)
continue
}
// Once a new block arrives, we update our running
// track of the height of the chain tip.
blockHeight := uint32(chainUpdate.Height)
atomic.StoreUint32(&r.bestHeight, blockHeight)
log.Infof("Pruning channel graph using block %v (height=%v)",
chainUpdate.Hash, blockHeight)
// We're only interested in all prior outputs that have
// been spent in the block, so collate all the
// referenced previous outpoints within each tx and
// input.
var spentOutputs []*wire.OutPoint
for _, tx := range chainUpdate.Transactions {
for _, txIn := range tx.TxIn {
spentOutputs = append(spentOutputs,
&txIn.PreviousOutPoint)
}
}
// With the spent outputs gathered, attempt to prune
// the channel graph, also passing in the hash+height
// of the block being pruned so the prune tip can be
// updated.
chansClosed, err := r.cfg.Graph.PruneGraph(spentOutputs,
&chainUpdate.Hash, chainUpdate.Height)
if err != nil {
log.Errorf("unable to prune routing table: %v", err)
continue
}
log.Infof("Block %v (height=%v) closed %v channels",
chainUpdate.Hash, blockHeight, len(chansClosed))
if len(chansClosed) == 0 {
continue
}
// Notify all currently registered clients of the newly
// closed channels.
closeSummaries := createCloseSummaries(blockHeight, chansClosed...)
r.notifyTopologyChange(&TopologyChange{
ClosedChannels: closeSummaries,
})
// A new notification client update has arrived. We're either
// gaining a new client, or cancelling notifications for an
// existing client.
case ntfnUpdate := <-r.ntfnClientUpdates:
clientID := ntfnUpdate.clientID
if ntfnUpdate.cancel {
r.RLock()
client, ok := r.topologyClients[ntfnUpdate.clientID]
r.RUnlock()
if ok {
r.Lock()
delete(r.topologyClients, clientID)
r.Unlock()
close(client.exit)
client.wg.Wait()
close(client.ntfnChan)
}
continue
}
r.Lock()
r.topologyClients[ntfnUpdate.clientID] = &topologyClient{
ntfnChan: ntfnUpdate.ntfnChan,
exit: make(chan struct{}),
}
r.Unlock()
// The graph prune ticker has ticked, so we'll examine the
// state of the known graph to filter out any zombie channels
// for pruning.
case <-graphPruneTicker.C:
if err := r.pruneZombieChans(); err != nil {
log.Errorf("Unable to prune zombies: %v", err)
}
// Log any stats if we've processed a non-empty number of
// channels, updates, or nodes. We'll only pause the ticker if
// the last window contained no updates to avoid resuming and
// pausing while consecutive windows contain new info.
case <-r.statTicker.Ticks():
if !r.stats.Empty() {
log.Infof(r.stats.String())
} else {
r.statTicker.Pause()
}
r.stats.Reset()
// The router has been signalled to exit, to we exit our main
// loop so the wait group can be decremented.
case <-r.quit:
return
}
}
}
// assertNodeAnnFreshness returns a non-nil error if we have an announcement in
// the database for the passed node with a timestamp newer than the passed
// timestamp. ErrIgnored will be returned if we already have the node, and
// ErrOutdated will be returned if we have a timestamp that's after the new
// timestamp.
func (r *ChannelRouter) assertNodeAnnFreshness(node route.Vertex,
msgTimestamp time.Time) error {
// If we are not already aware of this node, it means that we don't
// know about any channel using this node. To avoid a DoS attack by
// node announcements, we will ignore such nodes. If we do know about
// this node, check that this update brings info newer than what we
// already have.
lastUpdate, exists, err := r.cfg.Graph.HasLightningNode(node)
if err != nil {
return errors.Errorf("unable to query for the "+
"existence of node: %v", err)
}
if !exists {
return newErrf(ErrIgnored, "Ignoring node announcement"+
" for node not found in channel graph (%x)",
node[:])
}
// If we've reached this point then we're aware of the vertex being
// advertised. So we now check if the new message has a new time stamp,
// if not then we won't accept the new data as it would override newer
// data.
if !lastUpdate.Before(msgTimestamp) {
return newErrf(ErrOutdated, "Ignoring outdated "+
"announcement for %x", node[:])
}
return nil
}
// processUpdate processes a new relate authenticated channel/edge, node or
// channel/edge update network update. If the update didn't affect the internal
// state of the draft due to either being out of date, invalid, or redundant,
// then error is returned.
func (r *ChannelRouter) processUpdate(msg interface{}) error {
switch msg := msg.(type) {
case *channeldb.LightningNode:
// Before we add the node to the database, we'll check to see
// if the announcement is "fresh" or not. If it isn't, then
// we'll return an error.
err := r.assertNodeAnnFreshness(msg.PubKeyBytes, msg.LastUpdate)
if err != nil {
return err
}
if err := r.cfg.Graph.AddLightningNode(msg); err != nil {
return errors.Errorf("unable to add node %v to the "+
"graph: %v", msg.PubKeyBytes, err)
}
log.Tracef("Updated vertex data for node=%x", msg.PubKeyBytes)
r.stats.incNumNodeUpdates()
case *channeldb.ChannelEdgeInfo:
// Prior to processing the announcement we first check if we
// already know of this channel, if so, then we can exit early.
_, _, exists, isZombie, err := r.cfg.Graph.HasChannelEdge(
msg.ChannelID,
)
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
return errors.Errorf("unable to check for edge "+
"existence: %v", err)
}
if isZombie {
return newErrf(ErrIgnored, "ignoring msg for zombie "+
"chan_id=%v", msg.ChannelID)
}
if exists {
return newErrf(ErrIgnored, "ignoring msg for known "+
"chan_id=%v", msg.ChannelID)
}
// If AssumeChannelValid is present, then we are unable to
// perform any of the expensive checks below, so we'll
// short-circuit our path straight to adding the edge to our
// graph.
if r.cfg.AssumeChannelValid {
if err := r.cfg.Graph.AddChannelEdge(msg); err != nil {
return fmt.Errorf("unable to add edge: %v", err)
}
log.Tracef("New channel discovered! Link "+
"connects %x and %x with ChannelID(%v)",
msg.NodeKey1Bytes, msg.NodeKey2Bytes,
msg.ChannelID)
r.stats.incNumEdgesDiscovered()
break
}
// Before we can add the channel to the channel graph, we need
// to obtain the full funding outpoint that's encoded within
// the channel ID.
channelID := lnwire.NewShortChanIDFromInt(msg.ChannelID)
fundingPoint, _, err := r.fetchChanPoint(&channelID)
if err != nil {
return errors.Errorf("unable to fetch chan point for "+
"chan_id=%v: %v", msg.ChannelID, err)
}
// Recreate witness output to be sure that declared in channel
// edge bitcoin keys and channel value corresponds to the
// reality.
witnessScript, err := input.GenMultiSigScript(
msg.BitcoinKey1Bytes[:], msg.BitcoinKey2Bytes[:],
)
if err != nil {
return err
}
fundingPkScript, err := input.WitnessScriptHash(witnessScript)
if err != nil {
return err
}
// Now that we have the funding outpoint of the channel, ensure
// that it hasn't yet been spent. If so, then this channel has
// been closed so we'll ignore it.
chanUtxo, err := r.cfg.Chain.GetUtxo(
fundingPoint, fundingPkScript, channelID.BlockHeight,
r.quit,
)
if err != nil {
return fmt.Errorf("unable to fetch utxo "+
"for chan_id=%v, chan_point=%v: %v",
msg.ChannelID, fundingPoint, err)
}
// By checking the equality of witness pkscripts we checks that
// funding witness script is multisignature lock which contains
// both local and remote public keys which was declared in
// channel edge and also that the announced channel value is
// right.
if !bytes.Equal(fundingPkScript, chanUtxo.PkScript) {
return errors.Errorf("pkScript mismatch: expected %x, "+
"got %x", fundingPkScript, chanUtxo.PkScript)
}
// TODO(roasbeef): this is a hack, needs to be removed
// after commitment fees are dynamic.
msg.Capacity = btcutil.Amount(chanUtxo.Value)
msg.ChannelPoint = *fundingPoint
if err := r.cfg.Graph.AddChannelEdge(msg); err != nil {
return errors.Errorf("unable to add edge: %v", err)
}
log.Tracef("New channel discovered! Link "+
"connects %x and %x with ChannelPoint(%v): "+
"chan_id=%v, capacity=%v",
msg.NodeKey1Bytes, msg.NodeKey2Bytes,
fundingPoint, msg.ChannelID, msg.Capacity)
r.stats.incNumEdgesDiscovered()
// As a new edge has been added to the channel graph, we'll
// update the current UTXO filter within our active
// FilteredChainView so we are notified if/when this channel is
// closed.
filterUpdate := []channeldb.EdgePoint{
{
FundingPkScript: fundingPkScript,
OutPoint: *fundingPoint,
},
}
err = r.cfg.ChainView.UpdateFilter(
filterUpdate, atomic.LoadUint32(&r.bestHeight),
)
if err != nil {
return errors.Errorf("unable to update chain "+
"view: %v", err)
}
case *channeldb.ChannelEdgePolicy:
// We make sure to hold the mutex for this channel ID,
// such that no other goroutine is concurrently doing
// database accesses for the same channel ID.
r.channelEdgeMtx.Lock(msg.ChannelID)
defer r.channelEdgeMtx.Unlock(msg.ChannelID)
edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
r.cfg.Graph.HasChannelEdge(msg.ChannelID)
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
return errors.Errorf("unable to check for edge "+
"existence: %v", err)
}
// If the channel is marked as a zombie in our database, and
// we consider this a stale update, then we should not apply the
// policy.
isStaleUpdate := time.Since(msg.LastUpdate) > r.cfg.ChannelPruneExpiry
if isZombie && isStaleUpdate {
return newErrf(ErrIgnored, "ignoring stale update "+
"(flags=%v|%v) for zombie chan_id=%v",
msg.MessageFlags, msg.ChannelFlags,
msg.ChannelID)
}
// If the channel doesn't exist in our database, we cannot
// apply the updated policy.
if !exists {
return newErrf(ErrIgnored, "ignoring update "+
"(flags=%v|%v) for unknown chan_id=%v",
msg.MessageFlags, msg.ChannelFlags,
msg.ChannelID)
}
// As edges are directional edge node has a unique policy for
// the direction of the edge they control. Therefore we first
// check if we already have the most up to date information for
// that edge. If this message has a timestamp not strictly
// newer than what we already know of we can exit early.
switch {
// A flag set of 0 indicates this is an announcement for the
// "first" node in the channel.
case msg.ChannelFlags&lnwire.ChanUpdateDirection == 0:
// Ignore outdated message.
if !edge1Timestamp.Before(msg.LastUpdate) {
return newErrf(ErrOutdated, "Ignoring "+
"outdated update (flags=%v|%v) for "+
"known chan_id=%v", msg.MessageFlags,
msg.ChannelFlags, msg.ChannelID)
}
// Similarly, a flag set of 1 indicates this is an announcement
// for the "second" node in the channel.
case msg.ChannelFlags&lnwire.ChanUpdateDirection == 1:
// Ignore outdated message.
if !edge2Timestamp.Before(msg.LastUpdate) {
return newErrf(ErrOutdated, "Ignoring "+
"outdated update (flags=%v|%v) for "+
"known chan_id=%v", msg.MessageFlags,
msg.ChannelFlags, msg.ChannelID)
}
}
// Now that we know this isn't a stale update, we'll apply the
// new edge policy to the proper directional edge within the
// channel graph.
if err = r.cfg.Graph.UpdateEdgePolicy(msg); err != nil {
err := errors.Errorf("unable to add channel: %v", err)
log.Error(err)
return err
}
log.Tracef("New channel update applied: %v",
newLogClosure(func() string { return spew.Sdump(msg) }))
r.stats.incNumChannelUpdates()
default:
return errors.Errorf("wrong routing update message type")
}
r.statTicker.Resume()
return nil
}
// fetchChanPoint retrieves the original outpoint which is encoded within the
// channelID. This method also return the public key script for the target
// transaction.
//
// TODO(roasbeef): replace with call to GetBlockTransaction? (would allow to
// later use getblocktxn)
func (r *ChannelRouter) fetchChanPoint(
chanID *lnwire.ShortChannelID) (*wire.OutPoint, *wire.TxOut, error) {
// First fetch the block hash by the block number encoded, then use
// that hash to fetch the block itself.
blockNum := int64(chanID.BlockHeight)
blockHash, err := r.cfg.Chain.GetBlockHash(blockNum)
if err != nil {
return nil, nil, err
}
fundingBlock, err := r.cfg.Chain.GetBlock(blockHash)
if err != nil {
return nil, nil, err
}
// As a sanity check, ensure that the advertised transaction index is
// within the bounds of the total number of transactions within a
// block.
numTxns := uint32(len(fundingBlock.Transactions))
if chanID.TxIndex > numTxns-1 {
return nil, nil, fmt.Errorf("tx_index=#%v is out of range "+
"(max_index=%v), network_chan_id=%v\n", chanID.TxIndex,
numTxns-1, spew.Sdump(chanID))
}
// Finally once we have the block itself, we seek to the targeted
// transaction index to obtain the funding output and txout.
fundingTx := fundingBlock.Transactions[chanID.TxIndex]
outPoint := &wire.OutPoint{
Hash: fundingTx.TxHash(),
Index: uint32(chanID.TxPosition),
}
txOut := fundingTx.TxOut[chanID.TxPosition]
return outPoint, txOut, nil
}
// routingMsg couples a routing related routing topology update to the
// error channel.
type routingMsg struct {
msg interface{}
err chan error
}
// FindRoute attempts to query the ChannelRouter for the optimum path to a
// particular target destination to which it is able to send `amt` after
// factoring in channel capacities and cumulative fees along the route.
func (r *ChannelRouter) FindRoute(source, target route.Vertex,
amt lnwire.MilliSatoshi, restrictions *RestrictParams,
destTlvRecords []tlv.Record,
finalExpiry ...uint16) (*route.Route, error) {
var finalCLTVDelta uint16
if len(finalExpiry) == 0 {
finalCLTVDelta = zpay32.DefaultFinalCLTVDelta
} else {
finalCLTVDelta = finalExpiry[0]
}
log.Debugf("Searching for path to %x, sending %v", target, amt)
// We can short circuit the routing by opportunistically checking to
// see if the target vertex event exists in the current graph.
if _, exists, err := r.cfg.Graph.HasLightningNode(target); err != nil {
return nil, err
} else if !exists {
log.Debugf("Target %x is not in known graph", target)
return nil, newErrf(ErrTargetNotInNetwork, "target not found")
}
// We'll attempt to obtain a set of bandwidth hints that can help us
// eliminate certain routes early on in the path finding process.
bandwidthHints, err := generateBandwidthHints(
r.selfNode, r.cfg.QueryBandwidth,
)
if err != nil {
return nil, err
}
// Now that we know the destination is reachable within the graph, we'll
// execute our path finding algorithm.
path, err := findPath(
&graphParams{
graph: r.cfg.Graph,
bandwidthHints: bandwidthHints,
},
restrictions, &r.cfg.PathFindingConfig,
source, target, amt,
)
if err != nil {
return nil, err
}
// We'll fetch the current block height so we can properly calculate the
// required HTLC time locks within the route.
_, currentHeight, err := r.cfg.Chain.GetBestBlock()
if err != nil {
return nil, err
}
// Create the route with absolute time lock values.
route, err := newRoute(
amt, source, path, uint32(currentHeight), finalCLTVDelta,
destTlvRecords,
)
if err != nil {
return nil, err
}
go log.Tracef("Obtained path to send %v to %x: %v",
amt, target, newLogClosure(func() string {
return spew.Sdump(route)
}),
)
return route, nil
}
// generateNewSessionKey generates a new ephemeral private key to be used for a
// payment attempt.
func generateNewSessionKey() (*btcec.PrivateKey, error) {
// Generate a new random session key to ensure that we don't trigger
// any replay.
//
// TODO(roasbeef): add more sources of randomness?
return btcec.NewPrivateKey(btcec.S256())
}
// generateSphinxPacket generates then encodes a sphinx packet which encodes
// the onion route specified by the passed layer 3 route. The blob returned
// from this function can immediately be included within an HTLC add packet to
// be sent to the first hop within the route.
func generateSphinxPacket(rt *route.Route, paymentHash []byte,
sessionKey *btcec.PrivateKey) ([]byte, *sphinx.Circuit, error) {
// As a sanity check, we'll ensure that the set of hops has been
// properly filled in, otherwise, we won't actually be able to
// construct a route.
if len(rt.Hops) == 0 {
return nil, nil, route.ErrNoRouteHopsProvided
}
// Now that we know we have an actual route, we'll map the route into a
// sphinx payument path which includes per-hop paylods for each hop
// that give each node within the route the necessary information
// (fees, CLTV value, etc) to properly forward the payment.
sphinxPath, err := rt.ToSphinxPath()
if err != nil {
return nil, nil, err
}
log.Tracef("Constructed per-hop payloads for payment_hash=%x: %v",
paymentHash[:], newLogClosure(func() string {
path := make([]sphinx.OnionHop, sphinxPath.TrueRouteLength())
for i := range path {
hopCopy := sphinxPath[i]
hopCopy.NodePub.Curve = nil
path[i] = hopCopy
}
return spew.Sdump(path)
}),
)
// Next generate the onion routing packet which allows us to perform
// privacy preserving source routing across the network.
sphinxPacket, err := sphinx.NewOnionPacket(
sphinxPath, sessionKey, paymentHash,
)
if err != nil {
return nil, nil, err
}
// Finally, encode Sphinx packet using its wire representation to be
// included within the HTLC add packet.
var onionBlob bytes.Buffer
if err := sphinxPacket.Encode(&onionBlob); err != nil {
return nil, nil, err
}
log.Tracef("Generated sphinx packet: %v",
newLogClosure(func() string {
// We make a copy of the ephemeral key and unset the
// internal curve here in order to keep the logs from
// getting noisy.
key := *sphinxPacket.EphemeralKey
key.Curve = nil
packetCopy := *sphinxPacket
packetCopy.EphemeralKey = &key
return spew.Sdump(packetCopy)
}),
)
return onionBlob.Bytes(), &sphinx.Circuit{
SessionKey: sessionKey,
PaymentPath: sphinxPath.NodeKeys(),
}, nil
}
// LightningPayment describes a payment to be sent through the network to the
// final destination.
type LightningPayment struct {
// Target is the node in which the payment should be routed towards.
Target route.Vertex
// Amount is the value of the payment to send through the network in
// milli-satoshis.
Amount lnwire.MilliSatoshi
// FeeLimit is the maximum fee in millisatoshis that the payment should
// accept when sending it through the network. The payment will fail
// if there isn't a route with lower fees than this limit.
FeeLimit lnwire.MilliSatoshi
// CltvLimit is the maximum time lock that is allowed for attempts to
// complete this payment.
CltvLimit *uint32
// PaymentHash is the r-hash value to use within the HTLC extended to
// the first hop.
PaymentHash [32]byte
// FinalCLTVDelta is the CTLV expiry delta to use for the _final_ hop
// in the route. This means that the final hop will have a CLTV delta
// of at least: currentHeight + FinalCLTVDelta.
FinalCLTVDelta uint16
// PayAttemptTimeout is a timeout value that we'll use to determine
// when we should should abandon the payment attempt after consecutive
// payment failure. This prevents us from attempting to send a payment
// indefinitely. A zero value means the payment will never time out.
//
// TODO(halseth): make wallclock time to allow resume after startup.
PayAttemptTimeout time.Duration
// RouteHints represents the different routing hints that can be used to
// assist a payment in reaching its destination successfully. These
// hints will act as intermediate hops along the route.
//
// NOTE: This is optional unless required by the payment. When providing
// multiple routes, ensure the hop hints within each route are chained
// together and sorted in forward order in order to reach the
// destination successfully.
RouteHints [][]zpay32.HopHint
// OutgoingChannelID is the channel that needs to be taken to the first
// hop. If nil, any channel may be used.
OutgoingChannelID *uint64
// PaymentRequest is an optional payment request that this payment is
// attempting to complete.
PaymentRequest []byte
// FinalDestRecords are TLV records that are to be sent to the final
// hop in the new onion payload format. If the destination does not
// understand this new onion payload format, then the payment will
// fail.
FinalDestRecords []tlv.Record
}
// SendPayment attempts to send a payment as described within the passed
// LightningPayment. This function is blocking and will return either: when the
// payment is successful, or all candidates routes have been attempted and
// resulted in a failed payment. If the payment succeeds, then a non-nil Route
// will be returned which describes the path the successful payment traversed
// within the network to reach the destination. Additionally, the payment
// preimage will also be returned.
func (r *ChannelRouter) SendPayment(payment *LightningPayment) ([32]byte,
*route.Route, error) {
paySession, err := r.preparePayment(payment)
if err != nil {
return [32]byte{}, nil, err
}
// Since this is the first time this payment is being made, we pass nil
// for the existing attempt.
return r.sendPayment(nil, payment, paySession)
}
// SendPaymentAsync is the non-blocking version of SendPayment. The payment
// result needs to be retrieved via the control tower.
func (r *ChannelRouter) SendPaymentAsync(payment *LightningPayment) error {
paySession, err := r.preparePayment(payment)
if err != nil {
return err
}
// Since this is the first time this payment is being made, we pass nil
// for the existing attempt.
r.wg.Add(1)
go func() {
defer r.wg.Done()
_, _, err := r.sendPayment(nil, payment, paySession)
if err != nil {
log.Errorf("Payment with hash %x failed: %v",
payment.PaymentHash, err)
}
}()
return nil
}
// preparePayment creates the payment session and registers the payment with the
// control tower.
func (r *ChannelRouter) preparePayment(payment *LightningPayment) (
PaymentSession, error) {
// Before starting the HTLC routing attempt, we'll create a fresh
// payment session which will report our errors back to mission
// control.
paySession, err := r.cfg.SessionSource.NewPaymentSession(
payment.RouteHints, payment.Target,
)
if err != nil {
return nil, err
}
// Record this payment hash with the ControlTower, ensuring it is not
// already in-flight.
//
// TODO(roasbeef): store records as part of creation info?
info := &channeldb.PaymentCreationInfo{
PaymentHash: payment.PaymentHash,
Value: payment.Amount,
CreationDate: time.Now(),
PaymentRequest: payment.PaymentRequest,
}
err = r.cfg.Control.InitPayment(payment.PaymentHash, info)
if err != nil {
return nil, err
}
return paySession, nil
}
// SendToRoute attempts to send a payment with the given hash through the
// provided route. This function is blocking and will return the obtained
// preimage if the payment is successful or the full error in case of a failure.
func (r *ChannelRouter) SendToRoute(hash lntypes.Hash, route *route.Route) (
lntypes.Preimage, error) {
// Create a payment session for just this route.
paySession := r.cfg.SessionSource.NewPaymentSessionForRoute(route)
// Calculate amount paid to receiver.
amt := route.TotalAmount - route.TotalFees()
// Record this payment hash with the ControlTower, ensuring it is not
// already in-flight.
info := &channeldb.PaymentCreationInfo{
PaymentHash: hash,
Value: amt,
CreationDate: time.Now(),
PaymentRequest: nil,
}
err := r.cfg.Control.InitPayment(hash, info)
if err != nil {
return [32]byte{}, err
}
// Create a (mostly) dummy payment, as the created payment session is
// not going to do path finding.
// TODO(halseth): sendPayment doesn't really need LightningPayment, make
// it take just needed fields instead.
//
// PayAttemptTime doesn't need to be set, as there is only a single
// attempt.
payment := &LightningPayment{
PaymentHash: hash,
}
// Since this is the first time this payment is being made, we pass nil
// for the existing attempt.
preimage, _, err := r.sendPayment(nil, payment, paySession)
if err != nil {
// SendToRoute should return a structured error. In case the
// provided route fails, payment lifecycle will return a
// noRouteError with the structured error embedded.
if noRouteError, ok := err.(errNoRoute); ok {
if noRouteError.lastError == nil {
return lntypes.Preimage{},
errors.New("failure message missing")
}
return lntypes.Preimage{}, noRouteError.lastError
}
return lntypes.Preimage{}, err
}
return preimage, nil
}
// sendPayment attempts to send a payment as described within the passed
// LightningPayment. This function is blocking and will return either: when the
// payment is successful, or all candidates routes have been attempted and
// resulted in a failed payment. If the payment succeeds, then a non-nil Route
// will be returned which describes the path the successful payment traversed
// within the network to reach the destination. Additionally, the payment
// preimage will also be returned.
//
// The existing attempt argument should be set to nil if this is a payment that
// haven't had any payment attempt sent to the switch yet. If it has had an
// attempt already, it should be passed such that the result can be retrieved.
//
// This method relies on the ControlTower's internal payment state machine to
// carry out its execution. After restarts it is safe, and assumed, that the
// router will call this method for every payment still in-flight according to
// the ControlTower.
func (r *ChannelRouter) sendPayment(
existingAttempt *channeldb.PaymentAttemptInfo,
payment *LightningPayment, paySession PaymentSession) (
[32]byte, *route.Route, error) {
log.Tracef("Dispatching route for lightning payment: %v",
newLogClosure(func() string {
// Make a copy of the payment with a nilled Curve
// before spewing.
var routeHints [][]zpay32.HopHint
for _, routeHint := range payment.RouteHints {
var hopHints []zpay32.HopHint
for _, hopHint := range routeHint {
h := hopHint.Copy()
h.NodeID.Curve = nil
hopHints = append(hopHints, h)
}
routeHints = append(routeHints, hopHints)
}
p := *payment
p.RouteHints = routeHints
return spew.Sdump(p)
}),
)
// We'll also fetch the current block height so we can properly
// calculate the required HTLC time locks within the route.
_, currentHeight, err := r.cfg.Chain.GetBestBlock()
if err != nil {
return [32]byte{}, nil, err
}
// Now set up a paymentLifecycle struct with these params, such that we
// can resume the payment from the current state.
p := &paymentLifecycle{
router: r,
payment: payment,
paySession: paySession,
currentHeight: currentHeight,
finalCLTVDelta: uint16(payment.FinalCLTVDelta),
attempt: existingAttempt,
circuit: nil,
lastError: nil,
}
// If a timeout is specified, create a timeout channel. If no timeout is
// specified, the channel is left nil and will never abort the payment
// loop.
if payment.PayAttemptTimeout != 0 {
p.timeoutChan = time.After(payment.PayAttemptTimeout)
}
return p.resumePayment()
}
// tryApplyChannelUpdate tries to apply a channel update present in the failure
// message if any.
func (r *ChannelRouter) tryApplyChannelUpdate(rt *route.Route,
errorSourceIdx int, failure lnwire.FailureMessage) error {
// It makes no sense to apply our own channel updates.
if errorSourceIdx == 0 {
log.Errorf("Channel update of ourselves received")
return nil
}
// Extract channel update if the error contains one.
update := r.extractChannelUpdate(failure)
if update == nil {
return nil
}
// Parse pubkey to allow validation of the channel update. This should
// always succeed, otherwise there is something wrong in our
// implementation. Therefore return an error.
errVertex := rt.Hops[errorSourceIdx-1].PubKeyBytes
errSource, err := btcec.ParsePubKey(
errVertex[:], btcec.S256(),
)
if err != nil {
log.Errorf("Cannot parse pubkey: idx=%v, pubkey=%v",
errorSourceIdx, errVertex)
return err
}
// Apply channel update.
if !r.applyChannelUpdate(update, errSource) {
log.Debugf("Invalid channel update received: node=%x",
errVertex)
}
return nil
}
// processSendError analyzes the error for the payment attempt received from the
// switch and updates mission control and/or channel policies. Depending on the
// error type, this error is either the final outcome of the payment or we need
// to continue with an alternative route. This is indicated by the boolean
// return value.
func (r *ChannelRouter) processSendError(paymentID uint64, rt *route.Route,
sendErr error) *channeldb.FailureReason {
internalErrorReason := channeldb.FailureReasonError
reportFail := func(srcIdx *int,
msg lnwire.FailureMessage) *channeldb.FailureReason {
// Report outcome to mission control.
reason, err := r.cfg.MissionControl.ReportPaymentFail(
paymentID, rt, srcIdx, msg,
)
if err != nil {
log.Errorf("Error reporting payment result to mc: %v",
err)
return &internalErrorReason
}
return reason
}
if sendErr == htlcswitch.ErrUnreadableFailureMessage {
log.Tracef("Unreadable failure when sending htlc")
return reportFail(nil, nil)
}
// If an internal, non-forwarding error occurred, we can stop
// trying.
fErr, ok := sendErr.(*htlcswitch.ForwardingError)
if !ok {
return &internalErrorReason
}
failureMessage := fErr.FailureMessage
failureSourceIdx := fErr.FailureSourceIdx
// Apply channel update if the error contains one. For unknown
// failures, failureMessage is nil.
if failureMessage != nil {
err := r.tryApplyChannelUpdate(
rt, failureSourceIdx, failureMessage,
)
if err != nil {
return &internalErrorReason
}
}
log.Tracef("Node=%v reported failure when sending htlc",
failureSourceIdx)
return reportFail(&failureSourceIdx, failureMessage)
}
// extractChannelUpdate examines the error and extracts the channel update.
func (r *ChannelRouter) extractChannelUpdate(
failure lnwire.FailureMessage) *lnwire.ChannelUpdate {
var update *lnwire.ChannelUpdate
switch onionErr := failure.(type) {
case *lnwire.FailExpiryTooSoon:
update = &onionErr.Update
case *lnwire.FailAmountBelowMinimum:
update = &onionErr.Update
case *lnwire.FailFeeInsufficient:
update = &onionErr.Update
case *lnwire.FailIncorrectCltvExpiry:
update = &onionErr.Update
case *lnwire.FailChannelDisabled:
update = &onionErr.Update
case *lnwire.FailTemporaryChannelFailure:
update = onionErr.Update
}
return update
}
// applyChannelUpdate validates a channel update and if valid, applies it to the
// database. It returns a bool indicating whether the updates was successful.
func (r *ChannelRouter) applyChannelUpdate(msg *lnwire.ChannelUpdate,
pubKey *btcec.PublicKey) bool {
ch, _, _, err := r.GetChannelByID(msg.ShortChannelID)
if err != nil {
log.Errorf("Unable to retrieve channel by id: %v", err)
return false
}
if err := ValidateChannelUpdateAnn(pubKey, ch.Capacity, msg); err != nil {
log.Errorf("Unable to validate channel update: %v", err)
return false
}
err = r.UpdateEdge(&channeldb.ChannelEdgePolicy{
SigBytes: msg.Signature.ToSignatureBytes(),
ChannelID: msg.ShortChannelID.ToUint64(),
LastUpdate: time.Unix(int64(msg.Timestamp), 0),
MessageFlags: msg.MessageFlags,
ChannelFlags: msg.ChannelFlags,
TimeLockDelta: msg.TimeLockDelta,
MinHTLC: msg.HtlcMinimumMsat,
MaxHTLC: msg.HtlcMaximumMsat,
FeeBaseMSat: lnwire.MilliSatoshi(msg.BaseFee),
FeeProportionalMillionths: lnwire.MilliSatoshi(msg.FeeRate),
})
if err != nil && !IsError(err, ErrIgnored, ErrOutdated) {
log.Errorf("Unable to apply channel update: %v", err)
return false
}
return true
}
// AddNode is used to add information about a node to the router database. If
// the node with this pubkey is not present in an existing channel, it will
// be ignored.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) AddNode(node *channeldb.LightningNode) error {
rMsg := &routingMsg{
msg: node,
err: make(chan error, 1),
}
select {
case r.networkUpdates <- rMsg:
select {
case err := <-rMsg.err:
return err
case <-r.quit:
return ErrRouterShuttingDown
}
case <-r.quit:
return ErrRouterShuttingDown
}
}
// AddEdge is used to add edge/channel to the topology of the router, after all
// information about channel will be gathered this edge/channel might be used
// in construction of payment path.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) AddEdge(edge *channeldb.ChannelEdgeInfo) error {
rMsg := &routingMsg{
msg: edge,
err: make(chan error, 1),
}
select {
case r.networkUpdates <- rMsg:
select {
case err := <-rMsg.err:
return err
case <-r.quit:
return ErrRouterShuttingDown
}
case <-r.quit:
return ErrRouterShuttingDown
}
}
// UpdateEdge is used to update edge information, without this message edge
// considered as not fully constructed.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) UpdateEdge(update *channeldb.ChannelEdgePolicy) error {
rMsg := &routingMsg{
msg: update,
err: make(chan error, 1),
}
select {
case r.networkUpdates <- rMsg:
select {
case err := <-rMsg.err:
return err
case <-r.quit:
return ErrRouterShuttingDown
}
case <-r.quit:
return ErrRouterShuttingDown
}
}
// CurrentBlockHeight returns the block height from POV of the router subsystem.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) CurrentBlockHeight() (uint32, error) {
_, height, err := r.cfg.Chain.GetBestBlock()
return uint32(height), err
}
// GetChannelByID return the channel by the channel id.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) GetChannelByID(chanID lnwire.ShortChannelID) (
*channeldb.ChannelEdgeInfo,
*channeldb.ChannelEdgePolicy,
*channeldb.ChannelEdgePolicy, error) {
return r.cfg.Graph.FetchChannelEdgesByID(chanID.ToUint64())
}
// FetchLightningNode attempts to look up a target node by its identity public
// key. channeldb.ErrGraphNodeNotFound is returned if the node doesn't exist
// within the graph.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) FetchLightningNode(node route.Vertex) (*channeldb.LightningNode, error) {
pubKey, err := btcec.ParsePubKey(node[:], btcec.S256())
if err != nil {
return nil, fmt.Errorf("unable to parse raw public key: %v", err)
}
return r.cfg.Graph.FetchLightningNode(pubKey)
}
// ForEachNode is used to iterate over every node in router topology.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) ForEachNode(cb func(*channeldb.LightningNode) error) error {
return r.cfg.Graph.ForEachNode(nil, func(_ *bbolt.Tx, n *channeldb.LightningNode) error {
return cb(n)
})
}
// ForAllOutgoingChannels is used to iterate over all outgoing channels owned by
// the router.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) ForAllOutgoingChannels(cb func(*channeldb.ChannelEdgeInfo,
*channeldb.ChannelEdgePolicy) error) error {
return r.selfNode.ForEachChannel(nil, func(_ *bbolt.Tx, c *channeldb.ChannelEdgeInfo,
e, _ *channeldb.ChannelEdgePolicy) error {
if e == nil {
return fmt.Errorf("Channel from self node has no policy")
}
return cb(c, e)
})
}
// ForEachChannel is used to iterate over every known edge (channel) within our
// view of the channel graph.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) ForEachChannel(cb func(chanInfo *channeldb.ChannelEdgeInfo,
e1, e2 *channeldb.ChannelEdgePolicy) error) error {
return r.cfg.Graph.ForEachChannel(cb)
}
// AddProof updates the channel edge info with proof which is needed to
// properly announce the edge to the rest of the network.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) AddProof(chanID lnwire.ShortChannelID,
proof *channeldb.ChannelAuthProof) error {
info, _, _, err := r.cfg.Graph.FetchChannelEdgesByID(chanID.ToUint64())
if err != nil {
return err
}
info.AuthProof = proof
return r.cfg.Graph.UpdateChannelEdge(info)
}
// IsStaleNode returns true if the graph source has a node announcement for the
// target node with a more recent timestamp.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) IsStaleNode(node route.Vertex, timestamp time.Time) bool {
// If our attempt to assert that the node announcement is fresh fails,
// then we know that this is actually a stale announcement.
return r.assertNodeAnnFreshness(node, timestamp) != nil
}
// IsPublicNode determines whether the given vertex is seen as a public node in
// the graph from the graph's source node's point of view.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) IsPublicNode(node route.Vertex) (bool, error) {
return r.cfg.Graph.IsPublicNode(node)
}
// IsKnownEdge returns true if the graph source already knows of the passed
// channel ID either as a live or zombie edge.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) IsKnownEdge(chanID lnwire.ShortChannelID) bool {
_, _, exists, isZombie, _ := r.cfg.Graph.HasChannelEdge(chanID.ToUint64())
return exists || isZombie
}
// IsStaleEdgePolicy returns true if the graph soruce has a channel edge for
// the passed channel ID (and flags) that have a more recent timestamp.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) IsStaleEdgePolicy(chanID lnwire.ShortChannelID,
timestamp time.Time, flags lnwire.ChanUpdateChanFlags) bool {
edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
r.cfg.Graph.HasChannelEdge(chanID.ToUint64())
if err != nil {
return false
}
// If we know of the edge as a zombie, then we'll make some additional
// checks to determine if the new policy is fresh.
if isZombie {
// When running with AssumeChannelValid, we also prune channels
// if both of their edges are disabled. We'll mark the new
// policy as stale if it remains disabled.
if r.cfg.AssumeChannelValid {
isDisabled := flags&lnwire.ChanUpdateDisabled ==
lnwire.ChanUpdateDisabled
if isDisabled {
return true
}
}
// Otherwise, we'll fall back to our usual ChannelPruneExpiry.
return time.Since(timestamp) > r.cfg.ChannelPruneExpiry
}
// If we don't know of the edge, then it means it's fresh (thus not
// stale).
if !exists {
return false
}
// As edges are directional edge node has a unique policy for the
// direction of the edge they control. Therefore we first check if we
// already have the most up to date information for that edge. If so,
// then we can exit early.
switch {
// A flag set of 0 indicates this is an announcement for the "first"
// node in the channel.
case flags&lnwire.ChanUpdateDirection == 0:
return !edge1Timestamp.Before(timestamp)
// Similarly, a flag set of 1 indicates this is an announcement for the
// "second" node in the channel.
case flags&lnwire.ChanUpdateDirection == 1:
return !edge2Timestamp.Before(timestamp)
}
return false
}
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) MarkEdgeLive(chanID lnwire.ShortChannelID) error {
return r.cfg.Graph.MarkEdgeLive(chanID.ToUint64())
}
// generateBandwidthHints is a helper function that's utilized the main
// findPath function in order to obtain hints from the lower layer w.r.t to the
// available bandwidth of edges on the network. Currently, we'll only obtain
// bandwidth hints for the edges we directly have open ourselves. Obtaining
// these hints allows us to reduce the number of extraneous attempts as we can
// skip channels that are inactive, or just don't have enough bandwidth to
// carry the payment.
func generateBandwidthHints(sourceNode *channeldb.LightningNode,
queryBandwidth func(*channeldb.ChannelEdgeInfo) lnwire.MilliSatoshi) (map[uint64]lnwire.MilliSatoshi, error) {
// First, we'll collect the set of outbound edges from the target
// source node.
var localChans []*channeldb.ChannelEdgeInfo
err := sourceNode.ForEachChannel(nil, func(tx *bbolt.Tx,
edgeInfo *channeldb.ChannelEdgeInfo,
_, _ *channeldb.ChannelEdgePolicy) error {
localChans = append(localChans, edgeInfo)
return nil
})
if err != nil {
return nil, err
}
// Now that we have all of our outbound edges, we'll populate the set
// of bandwidth hints, querying the lower switch layer for the most up
// to date values.
bandwidthHints := make(map[uint64]lnwire.MilliSatoshi)
for _, localChan := range localChans {
bandwidthHints[localChan.ChannelID] = queryBandwidth(localChan)
}
return bandwidthHints, nil
}
// runningAmounts keeps running amounts while the route is traversed.
type runningAmounts struct {
// amt is the intended amount to send via the route.
amt lnwire.MilliSatoshi
// max is the running maximum that the route can carry.
max lnwire.MilliSatoshi
}
// prependChannel returns a new set of running amounts that would result from
// prepending the given channel to the route. If canIncreaseAmt is set, the
// amount may be increased if it is too small to satisfy the channel's minimum
// htlc amount.
func (r *runningAmounts) prependChannel(policy *channeldb.ChannelEdgePolicy,
capacity btcutil.Amount, localChan bool, canIncreaseAmt bool) (
runningAmounts, error) {
// Determine max htlc value.
maxHtlc := lnwire.NewMSatFromSatoshis(capacity)
if policy.MessageFlags.HasMaxHtlc() {
maxHtlc = policy.MaxHTLC
}
amt := r.amt
// If we have a specific amount for which we are building the route,
// validate it against the channel constraints and return the new
// running amount.
if !canIncreaseAmt {
if amt < policy.MinHTLC || amt > maxHtlc {
return runningAmounts{}, fmt.Errorf("channel htlc "+
"constraints [%v - %v] violated with amt %v",
policy.MinHTLC, maxHtlc, amt)
}
// Update running amount by adding the fee for non-local
// channels.
if !localChan {
amt += policy.ComputeFee(amt)
}
return runningAmounts{
amt: amt,
}, nil
}
// Adapt the minimum amount to what this channel allows.
if policy.MinHTLC > r.amt {
amt = policy.MinHTLC
}
// Update the maximum amount too to be able to detect incompatible
// channels.
max := r.max
if maxHtlc < r.max {
max = maxHtlc
}
// If we get in the situation that the minimum amount exceeds the
// maximum amount (enforced further down stream), we have incompatible
// channel policies.
//
// There is possibility with pubkey addressing that we should have
// selected a different channel downstream, but we don't backtrack to
// try to fix that. It would complicate path finding while we expect
// this situation to be rare. The spec recommends to keep all policies
// towards a peer identical. If that is the case, there isn't a better
// channel that we should have selected.
if amt > max {
return runningAmounts{},
fmt.Errorf("incompatible channel policies: %v "+
"exceeds %v", amt, max)
}
// Add fees to the running amounts. Skip the source node fees as
// those do not need to be paid.
if !localChan {
amt += policy.ComputeFee(amt)
max += policy.ComputeFee(max)
}
return runningAmounts{amt: amt, max: max}, nil
}
// ErrNoChannel is returned when a route cannot be built because there are no
// channels that satisfy all requirements.
type ErrNoChannel struct {
position int
fromNode route.Vertex
}
// Error returns a human readable string describing the error.
func (e ErrNoChannel) Error() string {
return fmt.Sprintf("no matching outgoing channel available for "+
"node %v (%v)", e.position, e.fromNode)
}
// BuildRoute returns a fully specified route based on a list of pubkeys. If
// amount is nil, the minimum routable amount is used. To force a specific
// outgoing channel, use the outgoingChan parameter.
func (r *ChannelRouter) BuildRoute(amt *lnwire.MilliSatoshi,
hops []route.Vertex, outgoingChan *uint64,
finalCltvDelta int32) (*route.Route, error) {
log.Tracef("BuildRoute called: hopsCount=%v, amt=%v",
len(hops), amt)
// If no amount is specified, we need to build a route for the minimum
// amount that this route can carry.
useMinAmt := amt == nil
// We'll attempt to obtain a set of bandwidth hints that helps us select
// the best outgoing channel to use in case no outgoing channel is set.
bandwidthHints, err := generateBandwidthHints(
r.selfNode, r.cfg.QueryBandwidth,
)
if err != nil {
return nil, err
}
// Allocate a list that will contain the selected channels for this
// route.
edges := make([]*channeldb.ChannelEdgePolicy, len(hops))
// Keep a running amount and the maximum for this route.
amts := runningAmounts{
max: lnwire.MilliSatoshi(^uint64(0)),
}
if useMinAmt {
// For minimum amount routes, aim to deliver at least 1 msat to
// the destination. There are nodes in the wild that have a
// min_htlc channel policy of zero, which could lead to a zero
// amount payment being made.
amts.amt = 1
} else {
// If an amount is specified, we need to build a route that
// delivers exactly this amount to the final destination.
amts.amt = *amt
}
// Traverse hops backwards to accumulate fees in the running amounts.
source := r.selfNode.PubKeyBytes
for i := len(hops) - 1; i >= 0; i-- {
toNode := hops[i]
var fromNode route.Vertex
if i == 0 {
fromNode = source
} else {
fromNode = hops[i-1]
}
localChan := i == 0
// Iterate over candidate channels to select the channel
// to use for the final route.
var (
bestEdge *channeldb.ChannelEdgePolicy
bestAmts *runningAmounts
bestBandwidth lnwire.MilliSatoshi
)
cb := func(tx *bbolt.Tx,
edgeInfo *channeldb.ChannelEdgeInfo,
_, inEdge *channeldb.ChannelEdgePolicy) error {
chanID := edgeInfo.ChannelID
// Apply outgoing channel restriction is active.
if localChan && outgoingChan != nil &&
chanID != *outgoingChan {
return nil
}
// No unknown policy channels.
if inEdge == nil {
return nil
}
// Before we can process the edge, we'll need to
// fetch the node on the _other_ end of this
// channel as we may later need to iterate over
// the incoming edges of this node if we explore
// it further.
chanFromNode, err := edgeInfo.FetchOtherNode(
tx, toNode[:],
)
if err != nil {
return err
}
// Continue searching if this channel doesn't
// connect with the previous hop.
if chanFromNode.PubKeyBytes != fromNode {
return nil
}
// Validate whether this channel's policy is satisfied
// and obtain the new running amounts if this channel
// was to be selected.
newAmts, err := amts.prependChannel(
inEdge, edgeInfo.Capacity, localChan,
useMinAmt,
)
if err != nil {
log.Tracef("Skipping chan %v: %v",
inEdge.ChannelID, err)
return nil
}
// If we already have a best edge, check whether this
// edge is better.
bandwidth := bandwidthHints[chanID]
if bestEdge != nil {
if localChan {
// For local channels, better is defined
// as having more bandwidth. We try to
// maximize the chance that the returned
// route succeeds.
if bandwidth < bestBandwidth {
return nil
}
} else {
// For other channels, better is defined
// as lower fees for the amount to send.
// Normally all channels between two
// nodes should have the same policy,
// but in case not we minimize our cost
// here. Regular path finding would do
// the same.
if newAmts.amt > bestAmts.amt {
return nil
}
}
}
// If we get here, the current edge is better. Replace
// the best.
bestEdge = inEdge
bestAmts = &newAmts
bestBandwidth = bandwidth
return nil
}
err := r.cfg.Graph.ForEachNodeChannel(nil, toNode[:], cb)
if err != nil {
return nil, err
}
// There is no matching channel. Stop building the route here.
if bestEdge == nil {
return nil, ErrNoChannel{
fromNode: fromNode,
position: i,
}
}
log.Tracef("Select channel %v at position %v", bestEdge.ChannelID, i)
edges[i] = bestEdge
amts = *bestAmts
}
_, height, err := r.cfg.Chain.GetBestBlock()
if err != nil {
return nil, err
}
var receiverAmt lnwire.MilliSatoshi
if useMinAmt {
// We've calculated the minimum amount for the htlc that the
// source node hands out. The newRoute call below expects the
// amount that must reach the receiver after subtraction of fees
// along the way. Iterate over all edges to calculate the
// receiver amount.
receiverAmt = amts.amt
for _, edge := range edges[1:] {
receiverAmt -= edge.ComputeFeeFromIncoming(receiverAmt)
}
} else {
// Deliver the specified amount to the receiver.
receiverAmt = *amt
}
// Build and return the final route.
return newRoute(
receiverAmt, source, edges, uint32(height),
uint16(finalCltvDelta), nil,
)
}
routing: update 3d party channel verification to use new chanvalidate package
In the process of moving to use the new package, we no longer need to
fetch the outpoint directly, and instead only need to pass the funding
transaction into the new verification logic.
package routing
import (
"bytes"
"fmt"
"runtime"
"sync"
"sync/atomic"
"time"
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/htlcswitch"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwallet/chanvalidate"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/multimutex"
"github.com/lightningnetwork/lnd/routing/chainview"
"github.com/lightningnetwork/lnd/routing/route"
"github.com/lightningnetwork/lnd/ticker"
"github.com/lightningnetwork/lnd/tlv"
"github.com/lightningnetwork/lnd/zpay32"
)
const (
// DefaultPayAttemptTimeout is the default payment attempt timeout. The
// payment attempt timeout defines the duration after which we stop
// trying more routes for a payment.
DefaultPayAttemptTimeout = time.Duration(time.Second * 60)
// DefaultChannelPruneExpiry is the default duration used to determine
// if a channel should be pruned or not.
DefaultChannelPruneExpiry = time.Duration(time.Hour * 24 * 14)
// defaultStatInterval governs how often the router will log non-empty
// stats related to processing new channels, updates, or node
// announcements.
defaultStatInterval = time.Minute
)
var (
// ErrRouterShuttingDown is returned if the router is in the process of
// shutting down.
ErrRouterShuttingDown = fmt.Errorf("router shutting down")
)
// ChannelGraphSource represents the source of information about the topology
// of the lightning network. It's responsible for the addition of nodes, edges,
// applying edge updates, and returning the current block height with which the
// topology is synchronized.
type ChannelGraphSource interface {
// AddNode is used to add information about a node to the router
// database. If the node with this pubkey is not present in an existing
// channel, it will be ignored.
AddNode(node *channeldb.LightningNode) error
// AddEdge is used to add edge/channel to the topology of the router,
// after all information about channel will be gathered this
// edge/channel might be used in construction of payment path.
AddEdge(edge *channeldb.ChannelEdgeInfo) error
// AddProof updates the channel edge info with proof which is needed to
// properly announce the edge to the rest of the network.
AddProof(chanID lnwire.ShortChannelID, proof *channeldb.ChannelAuthProof) error
// UpdateEdge is used to update edge information, without this message
// edge considered as not fully constructed.
UpdateEdge(policy *channeldb.ChannelEdgePolicy) error
// IsStaleNode returns true if the graph source has a node announcement
// for the target node with a more recent timestamp. This method will
// also return true if we don't have an active channel announcement for
// the target node.
IsStaleNode(node route.Vertex, timestamp time.Time) bool
// IsPublicNode determines whether the given vertex is seen as a public
// node in the graph from the graph's source node's point of view.
IsPublicNode(node route.Vertex) (bool, error)
// IsKnownEdge returns true if the graph source already knows of the
// passed channel ID either as a live or zombie edge.
IsKnownEdge(chanID lnwire.ShortChannelID) bool
// IsStaleEdgePolicy returns true if the graph source has a channel
// edge for the passed channel ID (and flags) that have a more recent
// timestamp.
IsStaleEdgePolicy(chanID lnwire.ShortChannelID, timestamp time.Time,
flags lnwire.ChanUpdateChanFlags) bool
// MarkEdgeLive clears an edge from our zombie index, deeming it as
// live.
MarkEdgeLive(chanID lnwire.ShortChannelID) error
// ForAllOutgoingChannels is used to iterate over all channels
// emanating from the "source" node which is the center of the
// star-graph.
ForAllOutgoingChannels(cb func(c *channeldb.ChannelEdgeInfo,
e *channeldb.ChannelEdgePolicy) error) error
// CurrentBlockHeight returns the block height from POV of the router
// subsystem.
CurrentBlockHeight() (uint32, error)
// GetChannelByID return the channel by the channel id.
GetChannelByID(chanID lnwire.ShortChannelID) (*channeldb.ChannelEdgeInfo,
*channeldb.ChannelEdgePolicy, *channeldb.ChannelEdgePolicy, error)
// FetchLightningNode attempts to look up a target node by its identity
// public key. channeldb.ErrGraphNodeNotFound is returned if the node
// doesn't exist within the graph.
FetchLightningNode(route.Vertex) (*channeldb.LightningNode, error)
// ForEachNode is used to iterate over every node in the known graph.
ForEachNode(func(node *channeldb.LightningNode) error) error
// ForEachChannel is used to iterate over every channel in the known
// graph.
ForEachChannel(func(chanInfo *channeldb.ChannelEdgeInfo,
e1, e2 *channeldb.ChannelEdgePolicy) error) error
}
// PaymentAttemptDispatcher is used by the router to send payment attempts onto
// the network, and receive their results.
type PaymentAttemptDispatcher interface {
// SendHTLC is a function that directs a link-layer switch to
// forward a fully encoded payment to the first hop in the route
// denoted by its public key. A non-nil error is to be returned if the
// payment was unsuccessful.
SendHTLC(firstHop lnwire.ShortChannelID,
paymentID uint64,
htlcAdd *lnwire.UpdateAddHTLC) error
// GetPaymentResult returns the the result of the payment attempt with
// the given paymentID. The method returns a channel where the payment
// result will be sent when available, or an error is encountered
// during forwarding. When a result is received on the channel, the
// HTLC is guaranteed to no longer be in flight. The switch shutting
// down is signaled by closing the channel. If the paymentID is
// unknown, ErrPaymentIDNotFound will be returned.
GetPaymentResult(paymentID uint64, paymentHash lntypes.Hash,
deobfuscator htlcswitch.ErrorDecrypter) (
<-chan *htlcswitch.PaymentResult, error)
}
// PaymentSessionSource is an interface that defines a source for the router to
// retrive new payment sessions.
type PaymentSessionSource interface {
// NewPaymentSession creates a new payment session that will produce
// routes to the given target. An optional set of routing hints can be
// provided in order to populate additional edges to explore when
// finding a path to the payment's destination.
NewPaymentSession(routeHints [][]zpay32.HopHint,
target route.Vertex) (PaymentSession, error)
// NewPaymentSessionForRoute creates a new paymentSession instance that
// is just used for failure reporting to missioncontrol, and will only
// attempt the given route.
NewPaymentSessionForRoute(preBuiltRoute *route.Route) PaymentSession
// NewPaymentSessionEmpty creates a new paymentSession instance that is
// empty, and will be exhausted immediately. Used for failure reporting
// to missioncontrol for resumed payment we don't want to make more
// attempts for.
NewPaymentSessionEmpty() PaymentSession
}
// MissionController is an interface that exposes failure reporting and
// probability estimation.
type MissionController interface {
// ReportPaymentFail reports a failed payment to mission control as
// input for future probability estimates. It returns a bool indicating
// whether this error is a final error and no further payment attempts
// need to be made.
ReportPaymentFail(paymentID uint64, rt *route.Route,
failureSourceIdx *int, failure lnwire.FailureMessage) (
*channeldb.FailureReason, error)
// ReportPaymentSuccess reports a successful payment to mission control as input
// for future probability estimates.
ReportPaymentSuccess(paymentID uint64, rt *route.Route) error
// GetProbability is expected to return the success probability of a
// payment from fromNode along edge.
GetProbability(fromNode, toNode route.Vertex,
amt lnwire.MilliSatoshi) float64
}
// FeeSchema is the set fee configuration for a Lightning Node on the network.
// Using the coefficients described within the schema, the required fee to
// forward outgoing payments can be derived.
type FeeSchema struct {
// BaseFee is the base amount of milli-satoshis that will be chained
// for ANY payment forwarded.
BaseFee lnwire.MilliSatoshi
// FeeRate is the rate that will be charged for forwarding payments.
// This value should be interpreted as the numerator for a fraction
// (fixed point arithmetic) whose denominator is 1 million. As a result
// the effective fee rate charged per mSAT will be: (amount *
// FeeRate/1,000,000).
FeeRate uint32
}
// ChannelPolicy holds the parameters that determine the policy we enforce
// when forwarding payments on a channel. These parameters are communicated
// to the rest of the network in ChannelUpdate messages.
type ChannelPolicy struct {
// FeeSchema holds the fee configuration for a channel.
FeeSchema
// TimeLockDelta is the required HTLC timelock delta to be used
// when forwarding payments.
TimeLockDelta uint32
// MaxHTLC is the maximum HTLC size including fees we are allowed to
// forward over this channel.
MaxHTLC lnwire.MilliSatoshi
}
// Config defines the configuration for the ChannelRouter. ALL elements within
// the configuration MUST be non-nil for the ChannelRouter to carry out its
// duties.
type Config struct {
// Graph is the channel graph that the ChannelRouter will use to gather
// metrics from and also to carry out path finding queries.
// TODO(roasbeef): make into an interface
Graph *channeldb.ChannelGraph
// Chain is the router's source to the most up-to-date blockchain data.
// All incoming advertised channels will be checked against the chain
// to ensure that the channels advertised are still open.
Chain lnwallet.BlockChainIO
// ChainView is an instance of a FilteredChainView which is used to
// watch the sub-set of the UTXO set (the set of active channels) that
// we need in order to properly maintain the channel graph.
ChainView chainview.FilteredChainView
// Payer is an instance of a PaymentAttemptDispatcher and is used by
// the router to send payment attempts onto the network, and receive
// their results.
Payer PaymentAttemptDispatcher
// Control keeps track of the status of ongoing payments, ensuring we
// can properly resume them across restarts.
Control ControlTower
// MissionControl is a shared memory of sorts that executions of
// payment path finding use in order to remember which vertexes/edges
// were pruned from prior attempts. During SendPayment execution,
// errors sent by nodes are mapped into a vertex or edge to be pruned.
// Each run will then take into account this set of pruned
// vertexes/edges to reduce route failure and pass on graph information
// gained to the next execution.
MissionControl MissionController
// SessionSource defines a source for the router to retrieve new payment
// sessions.
SessionSource PaymentSessionSource
// ChannelPruneExpiry is the duration used to determine if a channel
// should be pruned or not. If the delta between now and when the
// channel was last updated is greater than ChannelPruneExpiry, then
// the channel is marked as a zombie channel eligible for pruning.
ChannelPruneExpiry time.Duration
// GraphPruneInterval is used as an interval to determine how often we
// should examine the channel graph to garbage collect zombie channels.
GraphPruneInterval time.Duration
// QueryBandwidth is a method that allows the router to query the lower
// link layer to determine the up to date available bandwidth at a
// prospective link to be traversed. If the link isn't available, then
// a value of zero should be returned. Otherwise, the current up to
// date knowledge of the available bandwidth of the link should be
// returned.
QueryBandwidth func(edge *channeldb.ChannelEdgeInfo) lnwire.MilliSatoshi
// NextPaymentID is a method that guarantees to return a new, unique ID
// each time it is called. This is used by the router to generate a
// unique payment ID for each payment it attempts to send, such that
// the switch can properly handle the HTLC.
NextPaymentID func() (uint64, error)
// AssumeChannelValid toggles whether or not the router will check for
// spentness of channel outpoints. For neutrino, this saves long rescans
// from blocking initial usage of the daemon.
AssumeChannelValid bool
// PathFindingConfig defines global path finding parameters.
PathFindingConfig PathFindingConfig
}
// EdgeLocator is a struct used to identify a specific edge.
type EdgeLocator struct {
// ChannelID is the channel of this edge.
ChannelID uint64
// Direction takes the value of 0 or 1 and is identical in definition to
// the channel direction flag. A value of 0 means the direction from the
// lower node pubkey to the higher.
Direction uint8
}
// String returns a human readable version of the edgeLocator values.
func (e *EdgeLocator) String() string {
return fmt.Sprintf("%v:%v", e.ChannelID, e.Direction)
}
// ChannelRouter is the layer 3 router within the Lightning stack. Below the
// ChannelRouter is the HtlcSwitch, and below that is the Bitcoin blockchain
// itself. The primary role of the ChannelRouter is to respond to queries for
// potential routes that can support a payment amount, and also general graph
// reachability questions. The router will prune the channel graph
// automatically as new blocks are discovered which spend certain known funding
// outpoints, thereby closing their respective channels.
type ChannelRouter struct {
ntfnClientCounter uint64 // To be used atomically.
started uint32 // To be used atomically.
stopped uint32 // To be used atomically.
bestHeight uint32 // To be used atomically.
// cfg is a copy of the configuration struct that the ChannelRouter was
// initialized with.
cfg *Config
// selfNode is the center of the star-graph centered around the
// ChannelRouter. The ChannelRouter uses this node as a starting point
// when doing any path finding.
selfNode *channeldb.LightningNode
// newBlocks is a channel in which new blocks connected to the end of
// the main chain are sent over, and blocks updated after a call to
// UpdateFilter.
newBlocks <-chan *chainview.FilteredBlock
// staleBlocks is a channel in which blocks disconnected fromt the end
// of our currently known best chain are sent over.
staleBlocks <-chan *chainview.FilteredBlock
// networkUpdates is a channel that carries new topology updates
// messages from outside the ChannelRouter to be processed by the
// networkHandler.
networkUpdates chan *routingMsg
// topologyClients maps a client's unique notification ID to a
// topologyClient client that contains its notification dispatch
// channel.
topologyClients map[uint64]*topologyClient
// ntfnClientUpdates is a channel that's used to send new updates to
// topology notification clients to the ChannelRouter. Updates either
// add a new notification client, or cancel notifications for an
// existing client.
ntfnClientUpdates chan *topologyClientUpdate
// channelEdgeMtx is a mutex we use to make sure we process only one
// ChannelEdgePolicy at a time for a given channelID, to ensure
// consistency between the various database accesses.
channelEdgeMtx *multimutex.Mutex
// statTicker is a resumable ticker that logs the router's progress as
// it discovers channels or receives updates.
statTicker ticker.Ticker
// stats tracks newly processed channels, updates, and node
// announcements over a window of defaultStatInterval.
stats *routerStats
sync.RWMutex
quit chan struct{}
wg sync.WaitGroup
}
// A compile time check to ensure ChannelRouter implements the
// ChannelGraphSource interface.
var _ ChannelGraphSource = (*ChannelRouter)(nil)
// New creates a new instance of the ChannelRouter with the specified
// configuration parameters. As part of initialization, if the router detects
// that the channel graph isn't fully in sync with the latest UTXO (since the
// channel graph is a subset of the UTXO set) set, then the router will proceed
// to fully sync to the latest state of the UTXO set.
func New(cfg Config) (*ChannelRouter, error) {
selfNode, err := cfg.Graph.SourceNode()
if err != nil {
return nil, err
}
r := &ChannelRouter{
cfg: &cfg,
networkUpdates: make(chan *routingMsg),
topologyClients: make(map[uint64]*topologyClient),
ntfnClientUpdates: make(chan *topologyClientUpdate),
channelEdgeMtx: multimutex.NewMutex(),
selfNode: selfNode,
statTicker: ticker.New(defaultStatInterval),
stats: new(routerStats),
quit: make(chan struct{}),
}
return r, nil
}
// Start launches all the goroutines the ChannelRouter requires to carry out
// its duties. If the router has already been started, then this method is a
// noop.
func (r *ChannelRouter) Start() error {
if !atomic.CompareAndSwapUint32(&r.started, 0, 1) {
return nil
}
log.Tracef("Channel Router starting")
bestHash, bestHeight, err := r.cfg.Chain.GetBestBlock()
if err != nil {
return err
}
// If the graph has never been pruned, or hasn't fully been created yet,
// then we don't treat this as an explicit error.
if _, _, err := r.cfg.Graph.PruneTip(); err != nil {
switch {
case err == channeldb.ErrGraphNeverPruned:
fallthrough
case err == channeldb.ErrGraphNotFound:
// If the graph has never been pruned, then we'll set
// the prune height to the current best height of the
// chain backend.
_, err = r.cfg.Graph.PruneGraph(
nil, bestHash, uint32(bestHeight),
)
if err != nil {
return err
}
default:
return err
}
}
// If AssumeChannelValid is present, then we won't rely on pruning
// channels from the graph based on their spentness, but whether they
// are considered zombies or not.
if r.cfg.AssumeChannelValid {
if err := r.pruneZombieChans(); err != nil {
return err
}
} else {
// Otherwise, we'll use our filtered chain view to prune
// channels as soon as they are detected as spent on-chain.
if err := r.cfg.ChainView.Start(); err != nil {
return err
}
// Once the instance is active, we'll fetch the channel we'll
// receive notifications over.
r.newBlocks = r.cfg.ChainView.FilteredBlocks()
r.staleBlocks = r.cfg.ChainView.DisconnectedBlocks()
// Before we perform our manual block pruning, we'll construct
// and apply a fresh chain filter to the active
// FilteredChainView instance. We do this before, as otherwise
// we may miss on-chain events as the filter hasn't properly
// been applied.
channelView, err := r.cfg.Graph.ChannelView()
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
return err
}
log.Infof("Filtering chain using %v channels active",
len(channelView))
if len(channelView) != 0 {
err = r.cfg.ChainView.UpdateFilter(
channelView, uint32(bestHeight),
)
if err != nil {
return err
}
}
// Before we begin normal operation of the router, we first need
// to synchronize the channel graph to the latest state of the
// UTXO set.
if err := r.syncGraphWithChain(); err != nil {
return err
}
// Finally, before we proceed, we'll prune any unconnected nodes
// from the graph in order to ensure we maintain a tight graph
// of "useful" nodes.
err = r.cfg.Graph.PruneGraphNodes()
if err != nil && err != channeldb.ErrGraphNodesNotFound {
return err
}
}
// If any payments are still in flight, we resume, to make sure their
// results are properly handled.
payments, err := r.cfg.Control.FetchInFlightPayments()
if err != nil {
return err
}
for _, payment := range payments {
log.Infof("Resuming payment with hash %v", payment.Info.PaymentHash)
r.wg.Add(1)
go func(payment *channeldb.InFlightPayment) {
defer r.wg.Done()
// We create a dummy, empty payment session such that
// we won't make another payment attempt when the
// result for the in-flight attempt is received.
//
// PayAttemptTime doesn't need to be set, as there is
// only a single attempt.
paySession := r.cfg.SessionSource.NewPaymentSessionEmpty()
lPayment := &LightningPayment{
PaymentHash: payment.Info.PaymentHash,
}
_, _, err = r.sendPayment(payment.Attempt, lPayment, paySession)
if err != nil {
log.Errorf("Resuming payment with hash %v "+
"failed: %v.", payment.Info.PaymentHash, err)
return
}
log.Infof("Resumed payment with hash %v completed.",
payment.Info.PaymentHash)
}(payment)
}
r.wg.Add(1)
go r.networkHandler()
return nil
}
// Stop signals the ChannelRouter to gracefully halt all routines. This method
// will *block* until all goroutines have excited. If the channel router has
// already stopped then this method will return immediately.
func (r *ChannelRouter) Stop() error {
if !atomic.CompareAndSwapUint32(&r.stopped, 0, 1) {
return nil
}
log.Tracef("Channel Router shutting down")
// Our filtered chain view could've only been started if
// AssumeChannelValid isn't present.
if !r.cfg.AssumeChannelValid {
if err := r.cfg.ChainView.Stop(); err != nil {
return err
}
}
close(r.quit)
r.wg.Wait()
return nil
}
// syncGraphWithChain attempts to synchronize the current channel graph with
// the latest UTXO set state. This process involves pruning from the channel
// graph any channels which have been closed by spending their funding output
// since we've been down.
func (r *ChannelRouter) syncGraphWithChain() error {
// First, we'll need to check to see if we're already in sync with the
// latest state of the UTXO set.
bestHash, bestHeight, err := r.cfg.Chain.GetBestBlock()
if err != nil {
return err
}
r.bestHeight = uint32(bestHeight)
pruneHash, pruneHeight, err := r.cfg.Graph.PruneTip()
if err != nil {
switch {
// If the graph has never been pruned, or hasn't fully been
// created yet, then we don't treat this as an explicit error.
case err == channeldb.ErrGraphNeverPruned:
case err == channeldb.ErrGraphNotFound:
default:
return err
}
}
log.Infof("Prune tip for Channel Graph: height=%v, hash=%v", pruneHeight,
pruneHash)
switch {
// If the graph has never been pruned, then we can exit early as this
// entails it's being created for the first time and hasn't seen any
// block or created channels.
case pruneHeight == 0 || pruneHash == nil:
return nil
// If the block hashes and heights match exactly, then we don't need to
// prune the channel graph as we're already fully in sync.
case bestHash.IsEqual(pruneHash) && uint32(bestHeight) == pruneHeight:
return nil
}
// If the main chain blockhash at prune height is different from the
// prune hash, this might indicate the database is on a stale branch.
mainBlockHash, err := r.cfg.Chain.GetBlockHash(int64(pruneHeight))
if err != nil {
return err
}
// While we are on a stale branch of the chain, walk backwards to find
// first common block.
for !pruneHash.IsEqual(mainBlockHash) {
log.Infof("channel graph is stale. Disconnecting block %v "+
"(hash=%v)", pruneHeight, pruneHash)
// Prune the graph for every channel that was opened at height
// >= pruneHeight.
_, err := r.cfg.Graph.DisconnectBlockAtHeight(pruneHeight)
if err != nil {
return err
}
pruneHash, pruneHeight, err = r.cfg.Graph.PruneTip()
if err != nil {
switch {
// If at this point the graph has never been pruned, we
// can exit as this entails we are back to the point
// where it hasn't seen any block or created channels,
// alas there's nothing left to prune.
case err == channeldb.ErrGraphNeverPruned:
return nil
case err == channeldb.ErrGraphNotFound:
return nil
default:
return err
}
}
mainBlockHash, err = r.cfg.Chain.GetBlockHash(int64(pruneHeight))
if err != nil {
return err
}
}
log.Infof("Syncing channel graph from height=%v (hash=%v) to height=%v "+
"(hash=%v)", pruneHeight, pruneHash, bestHeight, bestHash)
// If we're not yet caught up, then we'll walk forward in the chain
// pruning the channel graph with each new block that hasn't yet been
// consumed by the channel graph.
var spentOutputs []*wire.OutPoint
for nextHeight := pruneHeight + 1; nextHeight <= uint32(bestHeight); nextHeight++ {
// Break out of the rescan early if a shutdown has been
// requested, otherwise long rescans will block the daemon from
// shutting down promptly.
select {
case <-r.quit:
return ErrRouterShuttingDown
default:
}
// Using the next height, request a manual block pruning from
// the chainview for the particular block hash.
nextHash, err := r.cfg.Chain.GetBlockHash(int64(nextHeight))
if err != nil {
return err
}
filterBlock, err := r.cfg.ChainView.FilterBlock(nextHash)
if err != nil {
return err
}
// We're only interested in all prior outputs that have been
// spent in the block, so collate all the referenced previous
// outpoints within each tx and input.
for _, tx := range filterBlock.Transactions {
for _, txIn := range tx.TxIn {
spentOutputs = append(spentOutputs,
&txIn.PreviousOutPoint)
}
}
}
// With the spent outputs gathered, attempt to prune the channel graph,
// also passing in the best hash+height so the prune tip can be updated.
closedChans, err := r.cfg.Graph.PruneGraph(
spentOutputs, bestHash, uint32(bestHeight),
)
if err != nil {
return err
}
log.Infof("Graph pruning complete: %v channels were closed since "+
"height %v", len(closedChans), pruneHeight)
return nil
}
// pruneZombieChans is a method that will be called periodically to prune out
// any "zombie" channels. We consider channels zombies if *both* edges haven't
// been updated since our zombie horizon. If AssumeChannelValid is present,
// we'll also consider channels zombies if *both* edges are disabled. This
// usually signals that a channel has been closed on-chain. We do this
// periodically to keep a healthy, lively routing table.
func (r *ChannelRouter) pruneZombieChans() error {
chansToPrune := make(map[uint64]struct{})
chanExpiry := r.cfg.ChannelPruneExpiry
log.Infof("Examining channel graph for zombie channels")
// A helper method to detect if the channel belongs to this node
isSelfChannelEdge := func(info *channeldb.ChannelEdgeInfo) bool {
return info.NodeKey1Bytes == r.selfNode.PubKeyBytes ||
info.NodeKey2Bytes == r.selfNode.PubKeyBytes
}
// First, we'll collect all the channels which are eligible for garbage
// collection due to being zombies.
filterPruneChans := func(info *channeldb.ChannelEdgeInfo,
e1, e2 *channeldb.ChannelEdgePolicy) error {
// Exit early in case this channel is already marked to be pruned
if _, markedToPrune := chansToPrune[info.ChannelID]; markedToPrune {
return nil
}
// We'll ensure that we don't attempt to prune our *own*
// channels from the graph, as in any case this should be
// re-advertised by the sub-system above us.
if isSelfChannelEdge(info) {
return nil
}
// If *both* edges haven't been updated for a period of
// chanExpiry, then we'll mark the channel itself as eligible
// for graph pruning.
var e1Zombie, e2Zombie bool
if e1 != nil {
e1Zombie = time.Since(e1.LastUpdate) >= chanExpiry
if e1Zombie {
log.Tracef("Edge #1 of ChannelID(%v) last "+
"update: %v", info.ChannelID,
e1.LastUpdate)
}
}
if e2 != nil {
e2Zombie = time.Since(e2.LastUpdate) >= chanExpiry
if e2Zombie {
log.Tracef("Edge #2 of ChannelID(%v) last "+
"update: %v", info.ChannelID,
e2.LastUpdate)
}
}
// If the channel is not considered zombie, we can move on to
// the next.
if !e1Zombie || !e2Zombie {
return nil
}
log.Debugf("ChannelID(%v) is a zombie, collecting to prune",
info.ChannelID)
// TODO(roasbeef): add ability to delete single directional edge
chansToPrune[info.ChannelID] = struct{}{}
return nil
}
// If AssumeChannelValid is present we'll look at the disabled bit for both
// edges. If they're both disabled, then we can interpret this as the
// channel being closed and can prune it from our graph.
if r.cfg.AssumeChannelValid {
disabledChanIDs, err := r.cfg.Graph.DisabledChannelIDs()
if err != nil {
return fmt.Errorf("unable to get disabled channels ids "+
"chans: %v", err)
}
disabledEdges, err := r.cfg.Graph.FetchChanInfos(disabledChanIDs)
if err != nil {
return fmt.Errorf("unable to fetch disabled channels edges "+
"chans: %v", err)
}
// Ensuring we won't prune our own channel from the graph.
for _, disabledEdge := range disabledEdges {
if !isSelfChannelEdge(disabledEdge.Info) {
chansToPrune[disabledEdge.Info.ChannelID] = struct{}{}
}
}
}
startTime := time.Unix(0, 0)
endTime := time.Now().Add(-1 * chanExpiry)
oldEdges, err := r.cfg.Graph.ChanUpdatesInHorizon(startTime, endTime)
if err != nil {
return fmt.Errorf("unable to fetch expired channel updates "+
"chans: %v", err)
}
for _, u := range oldEdges {
filterPruneChans(u.Info, u.Policy1, u.Policy2)
}
log.Infof("Pruning %v zombie channels", len(chansToPrune))
// With the set of zombie-like channels obtained, we'll do another pass
// to delete them from the channel graph.
toPrune := make([]uint64, 0, len(chansToPrune))
for chanID := range chansToPrune {
toPrune = append(toPrune, chanID)
log.Tracef("Pruning zombie channel with ChannelID(%v)", chanID)
}
if err := r.cfg.Graph.DeleteChannelEdges(toPrune...); err != nil {
return fmt.Errorf("unable to delete zombie channels: %v", err)
}
// With the channels pruned, we'll also attempt to prune any nodes that
// were a part of them.
err = r.cfg.Graph.PruneGraphNodes()
if err != nil && err != channeldb.ErrGraphNodesNotFound {
return fmt.Errorf("unable to prune graph nodes: %v", err)
}
return nil
}
// networkHandler is the primary goroutine for the ChannelRouter. The roles of
// this goroutine include answering queries related to the state of the
// network, pruning the graph on new block notification, applying network
// updates, and registering new topology clients.
//
// NOTE: This MUST be run as a goroutine.
func (r *ChannelRouter) networkHandler() {
defer r.wg.Done()
graphPruneTicker := time.NewTicker(r.cfg.GraphPruneInterval)
defer graphPruneTicker.Stop()
r.statTicker.Resume()
defer r.statTicker.Stop()
r.stats.Reset()
// We'll use this validation barrier to ensure that we process all jobs
// in the proper order during parallel validation.
validationBarrier := NewValidationBarrier(runtime.NumCPU()*4, r.quit)
for {
select {
// A new fully validated network update has just arrived. As a
// result we'll modify the channel graph accordingly depending
// on the exact type of the message.
case update := <-r.networkUpdates:
// We'll set up any dependants, and wait until a free
// slot for this job opens up, this allow us to not
// have thousands of goroutines active.
validationBarrier.InitJobDependencies(update.msg)
r.wg.Add(1)
go func() {
defer r.wg.Done()
defer validationBarrier.CompleteJob()
// If this message has an existing dependency,
// then we'll wait until that has been fully
// validated before we proceed.
err := validationBarrier.WaitForDependants(
update.msg,
)
if err != nil {
if err != ErrVBarrierShuttingDown {
log.Warnf("unexpected error "+
"during validation "+
"barrier shutdown: %v",
err)
}
return
}
// Process the routing update to determine if
// this is either a new update from our PoV or
// an update to a prior vertex/edge we
// previously accepted.
err = r.processUpdate(update.msg)
update.err <- err
// If this message had any dependencies, then
// we can now signal them to continue.
validationBarrier.SignalDependants(update.msg)
if err != nil {
return
}
// Send off a new notification for the newly
// accepted update.
topChange := &TopologyChange{}
err = addToTopologyChange(
r.cfg.Graph, topChange, update.msg,
)
if err != nil {
log.Errorf("unable to update topology "+
"change notification: %v", err)
return
}
if !topChange.isEmpty() {
r.notifyTopologyChange(topChange)
}
}()
// TODO(roasbeef): remove all unconnected vertexes
// after N blocks pass with no corresponding
// announcements.
case chainUpdate, ok := <-r.staleBlocks:
// If the channel has been closed, then this indicates
// the daemon is shutting down, so we exit ourselves.
if !ok {
return
}
// Since this block is stale, we update our best height
// to the previous block.
blockHeight := uint32(chainUpdate.Height)
atomic.StoreUint32(&r.bestHeight, blockHeight-1)
// Update the channel graph to reflect that this block
// was disconnected.
_, err := r.cfg.Graph.DisconnectBlockAtHeight(blockHeight)
if err != nil {
log.Errorf("unable to prune graph with stale "+
"block: %v", err)
continue
}
// TODO(halseth): notify client about the reorg?
// A new block has arrived, so we can prune the channel graph
// of any channels which were closed in the block.
case chainUpdate, ok := <-r.newBlocks:
// If the channel has been closed, then this indicates
// the daemon is shutting down, so we exit ourselves.
if !ok {
return
}
// We'll ensure that any new blocks received attach
// directly to the end of our main chain. If not, then
// we've somehow missed some blocks. We don't process
// this block as otherwise, we may miss on-chain
// events.
currentHeight := atomic.LoadUint32(&r.bestHeight)
if chainUpdate.Height != currentHeight+1 {
log.Errorf("out of order block: expecting "+
"height=%v, got height=%v", currentHeight+1,
chainUpdate.Height)
continue
}
// Once a new block arrives, we update our running
// track of the height of the chain tip.
blockHeight := uint32(chainUpdate.Height)
atomic.StoreUint32(&r.bestHeight, blockHeight)
log.Infof("Pruning channel graph using block %v (height=%v)",
chainUpdate.Hash, blockHeight)
// We're only interested in all prior outputs that have
// been spent in the block, so collate all the
// referenced previous outpoints within each tx and
// input.
var spentOutputs []*wire.OutPoint
for _, tx := range chainUpdate.Transactions {
for _, txIn := range tx.TxIn {
spentOutputs = append(spentOutputs,
&txIn.PreviousOutPoint)
}
}
// With the spent outputs gathered, attempt to prune
// the channel graph, also passing in the hash+height
// of the block being pruned so the prune tip can be
// updated.
chansClosed, err := r.cfg.Graph.PruneGraph(spentOutputs,
&chainUpdate.Hash, chainUpdate.Height)
if err != nil {
log.Errorf("unable to prune routing table: %v", err)
continue
}
log.Infof("Block %v (height=%v) closed %v channels",
chainUpdate.Hash, blockHeight, len(chansClosed))
if len(chansClosed) == 0 {
continue
}
// Notify all currently registered clients of the newly
// closed channels.
closeSummaries := createCloseSummaries(blockHeight, chansClosed...)
r.notifyTopologyChange(&TopologyChange{
ClosedChannels: closeSummaries,
})
// A new notification client update has arrived. We're either
// gaining a new client, or cancelling notifications for an
// existing client.
case ntfnUpdate := <-r.ntfnClientUpdates:
clientID := ntfnUpdate.clientID
if ntfnUpdate.cancel {
r.RLock()
client, ok := r.topologyClients[ntfnUpdate.clientID]
r.RUnlock()
if ok {
r.Lock()
delete(r.topologyClients, clientID)
r.Unlock()
close(client.exit)
client.wg.Wait()
close(client.ntfnChan)
}
continue
}
r.Lock()
r.topologyClients[ntfnUpdate.clientID] = &topologyClient{
ntfnChan: ntfnUpdate.ntfnChan,
exit: make(chan struct{}),
}
r.Unlock()
// The graph prune ticker has ticked, so we'll examine the
// state of the known graph to filter out any zombie channels
// for pruning.
case <-graphPruneTicker.C:
if err := r.pruneZombieChans(); err != nil {
log.Errorf("Unable to prune zombies: %v", err)
}
// Log any stats if we've processed a non-empty number of
// channels, updates, or nodes. We'll only pause the ticker if
// the last window contained no updates to avoid resuming and
// pausing while consecutive windows contain new info.
case <-r.statTicker.Ticks():
if !r.stats.Empty() {
log.Infof(r.stats.String())
} else {
r.statTicker.Pause()
}
r.stats.Reset()
// The router has been signalled to exit, to we exit our main
// loop so the wait group can be decremented.
case <-r.quit:
return
}
}
}
// assertNodeAnnFreshness returns a non-nil error if we have an announcement in
// the database for the passed node with a timestamp newer than the passed
// timestamp. ErrIgnored will be returned if we already have the node, and
// ErrOutdated will be returned if we have a timestamp that's after the new
// timestamp.
func (r *ChannelRouter) assertNodeAnnFreshness(node route.Vertex,
msgTimestamp time.Time) error {
// If we are not already aware of this node, it means that we don't
// know about any channel using this node. To avoid a DoS attack by
// node announcements, we will ignore such nodes. If we do know about
// this node, check that this update brings info newer than what we
// already have.
lastUpdate, exists, err := r.cfg.Graph.HasLightningNode(node)
if err != nil {
return errors.Errorf("unable to query for the "+
"existence of node: %v", err)
}
if !exists {
return newErrf(ErrIgnored, "Ignoring node announcement"+
" for node not found in channel graph (%x)",
node[:])
}
// If we've reached this point then we're aware of the vertex being
// advertised. So we now check if the new message has a new time stamp,
// if not then we won't accept the new data as it would override newer
// data.
if !lastUpdate.Before(msgTimestamp) {
return newErrf(ErrOutdated, "Ignoring outdated "+
"announcement for %x", node[:])
}
return nil
}
// processUpdate processes a new relate authenticated channel/edge, node or
// channel/edge update network update. If the update didn't affect the internal
// state of the draft due to either being out of date, invalid, or redundant,
// then error is returned.
func (r *ChannelRouter) processUpdate(msg interface{}) error {
switch msg := msg.(type) {
case *channeldb.LightningNode:
// Before we add the node to the database, we'll check to see
// if the announcement is "fresh" or not. If it isn't, then
// we'll return an error.
err := r.assertNodeAnnFreshness(msg.PubKeyBytes, msg.LastUpdate)
if err != nil {
return err
}
if err := r.cfg.Graph.AddLightningNode(msg); err != nil {
return errors.Errorf("unable to add node %v to the "+
"graph: %v", msg.PubKeyBytes, err)
}
log.Tracef("Updated vertex data for node=%x", msg.PubKeyBytes)
r.stats.incNumNodeUpdates()
case *channeldb.ChannelEdgeInfo:
// Prior to processing the announcement we first check if we
// already know of this channel, if so, then we can exit early.
_, _, exists, isZombie, err := r.cfg.Graph.HasChannelEdge(
msg.ChannelID,
)
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
return errors.Errorf("unable to check for edge "+
"existence: %v", err)
}
if isZombie {
return newErrf(ErrIgnored, "ignoring msg for zombie "+
"chan_id=%v", msg.ChannelID)
}
if exists {
return newErrf(ErrIgnored, "ignoring msg for known "+
"chan_id=%v", msg.ChannelID)
}
// If AssumeChannelValid is present, then we are unable to
// perform any of the expensive checks below, so we'll
// short-circuit our path straight to adding the edge to our
// graph.
if r.cfg.AssumeChannelValid {
if err := r.cfg.Graph.AddChannelEdge(msg); err != nil {
return fmt.Errorf("unable to add edge: %v", err)
}
log.Tracef("New channel discovered! Link "+
"connects %x and %x with ChannelID(%v)",
msg.NodeKey1Bytes, msg.NodeKey2Bytes,
msg.ChannelID)
r.stats.incNumEdgesDiscovered()
break
}
// Before we can add the channel to the channel graph, we need
// to obtain the full funding outpoint that's encoded within
// the channel ID.
channelID := lnwire.NewShortChanIDFromInt(msg.ChannelID)
fundingTx, err := r.fetchFundingTx(&channelID)
if err != nil {
return errors.Errorf("unable to fetch funding tx for "+
"chan_id=%v: %v", msg.ChannelID, err)
}
// Recreate witness output to be sure that declared in channel
// edge bitcoin keys and channel value corresponds to the
// reality.
witnessScript, err := input.GenMultiSigScript(
msg.BitcoinKey1Bytes[:], msg.BitcoinKey2Bytes[:],
)
if err != nil {
return err
}
pkScript, err := input.WitnessScriptHash(witnessScript)
if err != nil {
return err
}
// Next we'll validate that this channel is actually well
// formed. If this check fails, then this channel either
// doesn't exist, or isn't the one that was meant to be created
// according to the passed channel proofs.
fundingPoint, err := chanvalidate.Validate(&chanvalidate.Context{
Locator: &chanvalidate.ShortChanIDChanLocator{
ID: channelID,
},
MultiSigPkScript: pkScript,
FundingTx: fundingTx,
})
if err != nil {
return err
}
// Now that we have the funding outpoint of the channel, ensure
// that it hasn't yet been spent. If so, then this channel has
// been closed so we'll ignore it.
fundingPkScript, err := input.WitnessScriptHash(witnessScript)
if err != nil {
return err
}
chanUtxo, err := r.cfg.Chain.GetUtxo(
fundingPoint, fundingPkScript, channelID.BlockHeight,
r.quit,
)
if err != nil {
return fmt.Errorf("unable to fetch utxo "+
"for chan_id=%v, chan_point=%v: %v",
msg.ChannelID, fundingPoint, err)
}
// TODO(roasbeef): this is a hack, needs to be removed
// after commitment fees are dynamic.
msg.Capacity = btcutil.Amount(chanUtxo.Value)
msg.ChannelPoint = *fundingPoint
if err := r.cfg.Graph.AddChannelEdge(msg); err != nil {
return errors.Errorf("unable to add edge: %v", err)
}
log.Tracef("New channel discovered! Link "+
"connects %x and %x with ChannelPoint(%v): "+
"chan_id=%v, capacity=%v",
msg.NodeKey1Bytes, msg.NodeKey2Bytes,
fundingPoint, msg.ChannelID, msg.Capacity)
r.stats.incNumEdgesDiscovered()
// As a new edge has been added to the channel graph, we'll
// update the current UTXO filter within our active
// FilteredChainView so we are notified if/when this channel is
// closed.
filterUpdate := []channeldb.EdgePoint{
{
FundingPkScript: fundingPkScript,
OutPoint: *fundingPoint,
},
}
err = r.cfg.ChainView.UpdateFilter(
filterUpdate, atomic.LoadUint32(&r.bestHeight),
)
if err != nil {
return errors.Errorf("unable to update chain "+
"view: %v", err)
}
case *channeldb.ChannelEdgePolicy:
// We make sure to hold the mutex for this channel ID,
// such that no other goroutine is concurrently doing
// database accesses for the same channel ID.
r.channelEdgeMtx.Lock(msg.ChannelID)
defer r.channelEdgeMtx.Unlock(msg.ChannelID)
edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
r.cfg.Graph.HasChannelEdge(msg.ChannelID)
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
return errors.Errorf("unable to check for edge "+
"existence: %v", err)
}
// If the channel is marked as a zombie in our database, and
// we consider this a stale update, then we should not apply the
// policy.
isStaleUpdate := time.Since(msg.LastUpdate) > r.cfg.ChannelPruneExpiry
if isZombie && isStaleUpdate {
return newErrf(ErrIgnored, "ignoring stale update "+
"(flags=%v|%v) for zombie chan_id=%v",
msg.MessageFlags, msg.ChannelFlags,
msg.ChannelID)
}
// If the channel doesn't exist in our database, we cannot
// apply the updated policy.
if !exists {
return newErrf(ErrIgnored, "ignoring update "+
"(flags=%v|%v) for unknown chan_id=%v",
msg.MessageFlags, msg.ChannelFlags,
msg.ChannelID)
}
// As edges are directional edge node has a unique policy for
// the direction of the edge they control. Therefore we first
// check if we already have the most up to date information for
// that edge. If this message has a timestamp not strictly
// newer than what we already know of we can exit early.
switch {
// A flag set of 0 indicates this is an announcement for the
// "first" node in the channel.
case msg.ChannelFlags&lnwire.ChanUpdateDirection == 0:
// Ignore outdated message.
if !edge1Timestamp.Before(msg.LastUpdate) {
return newErrf(ErrOutdated, "Ignoring "+
"outdated update (flags=%v|%v) for "+
"known chan_id=%v", msg.MessageFlags,
msg.ChannelFlags, msg.ChannelID)
}
// Similarly, a flag set of 1 indicates this is an announcement
// for the "second" node in the channel.
case msg.ChannelFlags&lnwire.ChanUpdateDirection == 1:
// Ignore outdated message.
if !edge2Timestamp.Before(msg.LastUpdate) {
return newErrf(ErrOutdated, "Ignoring "+
"outdated update (flags=%v|%v) for "+
"known chan_id=%v", msg.MessageFlags,
msg.ChannelFlags, msg.ChannelID)
}
}
// Now that we know this isn't a stale update, we'll apply the
// new edge policy to the proper directional edge within the
// channel graph.
if err = r.cfg.Graph.UpdateEdgePolicy(msg); err != nil {
err := errors.Errorf("unable to add channel: %v", err)
log.Error(err)
return err
}
log.Tracef("New channel update applied: %v",
newLogClosure(func() string { return spew.Sdump(msg) }))
r.stats.incNumChannelUpdates()
default:
return errors.Errorf("wrong routing update message type")
}
r.statTicker.Resume()
return nil
}
// fetchFundingTx returns the funding transaction identified by the passed
// short channel ID.
//
// TODO(roasbeef): replace with call to GetBlockTransaction? (would allow to
// later use getblocktxn)
func (r *ChannelRouter) fetchFundingTx(
chanID *lnwire.ShortChannelID) (*wire.MsgTx, error) {
// First fetch the block hash by the block number encoded, then use
// that hash to fetch the block itself.
blockNum := int64(chanID.BlockHeight)
blockHash, err := r.cfg.Chain.GetBlockHash(blockNum)
if err != nil {
return nil, err
}
fundingBlock, err := r.cfg.Chain.GetBlock(blockHash)
if err != nil {
return nil, err
}
// As a sanity check, ensure that the advertised transaction index is
// within the bounds of the total number of transactions within a
// block.
numTxns := uint32(len(fundingBlock.Transactions))
if chanID.TxIndex > numTxns-1 {
return nil, fmt.Errorf("tx_index=#%v is out of range "+
"(max_index=%v), network_chan_id=%v", chanID.TxIndex,
numTxns-1, chanID)
}
return fundingBlock.Transactions[chanID.TxIndex], nil
}
// routingMsg couples a routing related routing topology update to the
// error channel.
type routingMsg struct {
msg interface{}
err chan error
}
// FindRoute attempts to query the ChannelRouter for the optimum path to a
// particular target destination to which it is able to send `amt` after
// factoring in channel capacities and cumulative fees along the route.
func (r *ChannelRouter) FindRoute(source, target route.Vertex,
amt lnwire.MilliSatoshi, restrictions *RestrictParams,
destTlvRecords []tlv.Record,
finalExpiry ...uint16) (*route.Route, error) {
var finalCLTVDelta uint16
if len(finalExpiry) == 0 {
finalCLTVDelta = zpay32.DefaultFinalCLTVDelta
} else {
finalCLTVDelta = finalExpiry[0]
}
log.Debugf("Searching for path to %x, sending %v", target, amt)
// We can short circuit the routing by opportunistically checking to
// see if the target vertex event exists in the current graph.
if _, exists, err := r.cfg.Graph.HasLightningNode(target); err != nil {
return nil, err
} else if !exists {
log.Debugf("Target %x is not in known graph", target)
return nil, newErrf(ErrTargetNotInNetwork, "target not found")
}
// We'll attempt to obtain a set of bandwidth hints that can help us
// eliminate certain routes early on in the path finding process.
bandwidthHints, err := generateBandwidthHints(
r.selfNode, r.cfg.QueryBandwidth,
)
if err != nil {
return nil, err
}
// Now that we know the destination is reachable within the graph, we'll
// execute our path finding algorithm.
path, err := findPath(
&graphParams{
graph: r.cfg.Graph,
bandwidthHints: bandwidthHints,
},
restrictions, &r.cfg.PathFindingConfig,
source, target, amt,
)
if err != nil {
return nil, err
}
// We'll fetch the current block height so we can properly calculate the
// required HTLC time locks within the route.
_, currentHeight, err := r.cfg.Chain.GetBestBlock()
if err != nil {
return nil, err
}
// Create the route with absolute time lock values.
route, err := newRoute(
amt, source, path, uint32(currentHeight), finalCLTVDelta,
destTlvRecords,
)
if err != nil {
return nil, err
}
go log.Tracef("Obtained path to send %v to %x: %v",
amt, target, newLogClosure(func() string {
return spew.Sdump(route)
}),
)
return route, nil
}
// generateNewSessionKey generates a new ephemeral private key to be used for a
// payment attempt.
func generateNewSessionKey() (*btcec.PrivateKey, error) {
// Generate a new random session key to ensure that we don't trigger
// any replay.
//
// TODO(roasbeef): add more sources of randomness?
return btcec.NewPrivateKey(btcec.S256())
}
// generateSphinxPacket generates then encodes a sphinx packet which encodes
// the onion route specified by the passed layer 3 route. The blob returned
// from this function can immediately be included within an HTLC add packet to
// be sent to the first hop within the route.
func generateSphinxPacket(rt *route.Route, paymentHash []byte,
sessionKey *btcec.PrivateKey) ([]byte, *sphinx.Circuit, error) {
// As a sanity check, we'll ensure that the set of hops has been
// properly filled in, otherwise, we won't actually be able to
// construct a route.
if len(rt.Hops) == 0 {
return nil, nil, route.ErrNoRouteHopsProvided
}
// Now that we know we have an actual route, we'll map the route into a
// sphinx payument path which includes per-hop paylods for each hop
// that give each node within the route the necessary information
// (fees, CLTV value, etc) to properly forward the payment.
sphinxPath, err := rt.ToSphinxPath()
if err != nil {
return nil, nil, err
}
log.Tracef("Constructed per-hop payloads for payment_hash=%x: %v",
paymentHash[:], newLogClosure(func() string {
path := make([]sphinx.OnionHop, sphinxPath.TrueRouteLength())
for i := range path {
hopCopy := sphinxPath[i]
hopCopy.NodePub.Curve = nil
path[i] = hopCopy
}
return spew.Sdump(path)
}),
)
// Next generate the onion routing packet which allows us to perform
// privacy preserving source routing across the network.
sphinxPacket, err := sphinx.NewOnionPacket(
sphinxPath, sessionKey, paymentHash,
)
if err != nil {
return nil, nil, err
}
// Finally, encode Sphinx packet using its wire representation to be
// included within the HTLC add packet.
var onionBlob bytes.Buffer
if err := sphinxPacket.Encode(&onionBlob); err != nil {
return nil, nil, err
}
log.Tracef("Generated sphinx packet: %v",
newLogClosure(func() string {
// We make a copy of the ephemeral key and unset the
// internal curve here in order to keep the logs from
// getting noisy.
key := *sphinxPacket.EphemeralKey
key.Curve = nil
packetCopy := *sphinxPacket
packetCopy.EphemeralKey = &key
return spew.Sdump(packetCopy)
}),
)
return onionBlob.Bytes(), &sphinx.Circuit{
SessionKey: sessionKey,
PaymentPath: sphinxPath.NodeKeys(),
}, nil
}
// LightningPayment describes a payment to be sent through the network to the
// final destination.
type LightningPayment struct {
// Target is the node in which the payment should be routed towards.
Target route.Vertex
// Amount is the value of the payment to send through the network in
// milli-satoshis.
Amount lnwire.MilliSatoshi
// FeeLimit is the maximum fee in millisatoshis that the payment should
// accept when sending it through the network. The payment will fail
// if there isn't a route with lower fees than this limit.
FeeLimit lnwire.MilliSatoshi
// CltvLimit is the maximum time lock that is allowed for attempts to
// complete this payment.
CltvLimit *uint32
// PaymentHash is the r-hash value to use within the HTLC extended to
// the first hop.
PaymentHash [32]byte
// FinalCLTVDelta is the CTLV expiry delta to use for the _final_ hop
// in the route. This means that the final hop will have a CLTV delta
// of at least: currentHeight + FinalCLTVDelta.
FinalCLTVDelta uint16
// PayAttemptTimeout is a timeout value that we'll use to determine
// when we should should abandon the payment attempt after consecutive
// payment failure. This prevents us from attempting to send a payment
// indefinitely. A zero value means the payment will never time out.
//
// TODO(halseth): make wallclock time to allow resume after startup.
PayAttemptTimeout time.Duration
// RouteHints represents the different routing hints that can be used to
// assist a payment in reaching its destination successfully. These
// hints will act as intermediate hops along the route.
//
// NOTE: This is optional unless required by the payment. When providing
// multiple routes, ensure the hop hints within each route are chained
// together and sorted in forward order in order to reach the
// destination successfully.
RouteHints [][]zpay32.HopHint
// OutgoingChannelID is the channel that needs to be taken to the first
// hop. If nil, any channel may be used.
OutgoingChannelID *uint64
// PaymentRequest is an optional payment request that this payment is
// attempting to complete.
PaymentRequest []byte
// FinalDestRecords are TLV records that are to be sent to the final
// hop in the new onion payload format. If the destination does not
// understand this new onion payload format, then the payment will
// fail.
FinalDestRecords []tlv.Record
}
// SendPayment attempts to send a payment as described within the passed
// LightningPayment. This function is blocking and will return either: when the
// payment is successful, or all candidates routes have been attempted and
// resulted in a failed payment. If the payment succeeds, then a non-nil Route
// will be returned which describes the path the successful payment traversed
// within the network to reach the destination. Additionally, the payment
// preimage will also be returned.
func (r *ChannelRouter) SendPayment(payment *LightningPayment) ([32]byte,
*route.Route, error) {
paySession, err := r.preparePayment(payment)
if err != nil {
return [32]byte{}, nil, err
}
// Since this is the first time this payment is being made, we pass nil
// for the existing attempt.
return r.sendPayment(nil, payment, paySession)
}
// SendPaymentAsync is the non-blocking version of SendPayment. The payment
// result needs to be retrieved via the control tower.
func (r *ChannelRouter) SendPaymentAsync(payment *LightningPayment) error {
paySession, err := r.preparePayment(payment)
if err != nil {
return err
}
// Since this is the first time this payment is being made, we pass nil
// for the existing attempt.
r.wg.Add(1)
go func() {
defer r.wg.Done()
_, _, err := r.sendPayment(nil, payment, paySession)
if err != nil {
log.Errorf("Payment with hash %x failed: %v",
payment.PaymentHash, err)
}
}()
return nil
}
// preparePayment creates the payment session and registers the payment with the
// control tower.
func (r *ChannelRouter) preparePayment(payment *LightningPayment) (
PaymentSession, error) {
// Before starting the HTLC routing attempt, we'll create a fresh
// payment session which will report our errors back to mission
// control.
paySession, err := r.cfg.SessionSource.NewPaymentSession(
payment.RouteHints, payment.Target,
)
if err != nil {
return nil, err
}
// Record this payment hash with the ControlTower, ensuring it is not
// already in-flight.
//
// TODO(roasbeef): store records as part of creation info?
info := &channeldb.PaymentCreationInfo{
PaymentHash: payment.PaymentHash,
Value: payment.Amount,
CreationDate: time.Now(),
PaymentRequest: payment.PaymentRequest,
}
err = r.cfg.Control.InitPayment(payment.PaymentHash, info)
if err != nil {
return nil, err
}
return paySession, nil
}
// SendToRoute attempts to send a payment with the given hash through the
// provided route. This function is blocking and will return the obtained
// preimage if the payment is successful or the full error in case of a failure.
func (r *ChannelRouter) SendToRoute(hash lntypes.Hash, route *route.Route) (
lntypes.Preimage, error) {
// Create a payment session for just this route.
paySession := r.cfg.SessionSource.NewPaymentSessionForRoute(route)
// Calculate amount paid to receiver.
amt := route.TotalAmount - route.TotalFees()
// Record this payment hash with the ControlTower, ensuring it is not
// already in-flight.
info := &channeldb.PaymentCreationInfo{
PaymentHash: hash,
Value: amt,
CreationDate: time.Now(),
PaymentRequest: nil,
}
err := r.cfg.Control.InitPayment(hash, info)
if err != nil {
return [32]byte{}, err
}
// Create a (mostly) dummy payment, as the created payment session is
// not going to do path finding.
// TODO(halseth): sendPayment doesn't really need LightningPayment, make
// it take just needed fields instead.
//
// PayAttemptTime doesn't need to be set, as there is only a single
// attempt.
payment := &LightningPayment{
PaymentHash: hash,
}
// Since this is the first time this payment is being made, we pass nil
// for the existing attempt.
preimage, _, err := r.sendPayment(nil, payment, paySession)
if err != nil {
// SendToRoute should return a structured error. In case the
// provided route fails, payment lifecycle will return a
// noRouteError with the structured error embedded.
if noRouteError, ok := err.(errNoRoute); ok {
if noRouteError.lastError == nil {
return lntypes.Preimage{},
errors.New("failure message missing")
}
return lntypes.Preimage{}, noRouteError.lastError
}
return lntypes.Preimage{}, err
}
return preimage, nil
}
// sendPayment attempts to send a payment as described within the passed
// LightningPayment. This function is blocking and will return either: when the
// payment is successful, or all candidates routes have been attempted and
// resulted in a failed payment. If the payment succeeds, then a non-nil Route
// will be returned which describes the path the successful payment traversed
// within the network to reach the destination. Additionally, the payment
// preimage will also be returned.
//
// The existing attempt argument should be set to nil if this is a payment that
// haven't had any payment attempt sent to the switch yet. If it has had an
// attempt already, it should be passed such that the result can be retrieved.
//
// This method relies on the ControlTower's internal payment state machine to
// carry out its execution. After restarts it is safe, and assumed, that the
// router will call this method for every payment still in-flight according to
// the ControlTower.
func (r *ChannelRouter) sendPayment(
existingAttempt *channeldb.PaymentAttemptInfo,
payment *LightningPayment, paySession PaymentSession) (
[32]byte, *route.Route, error) {
log.Tracef("Dispatching route for lightning payment: %v",
newLogClosure(func() string {
// Make a copy of the payment with a nilled Curve
// before spewing.
var routeHints [][]zpay32.HopHint
for _, routeHint := range payment.RouteHints {
var hopHints []zpay32.HopHint
for _, hopHint := range routeHint {
h := hopHint.Copy()
h.NodeID.Curve = nil
hopHints = append(hopHints, h)
}
routeHints = append(routeHints, hopHints)
}
p := *payment
p.RouteHints = routeHints
return spew.Sdump(p)
}),
)
// We'll also fetch the current block height so we can properly
// calculate the required HTLC time locks within the route.
_, currentHeight, err := r.cfg.Chain.GetBestBlock()
if err != nil {
return [32]byte{}, nil, err
}
// Now set up a paymentLifecycle struct with these params, such that we
// can resume the payment from the current state.
p := &paymentLifecycle{
router: r,
payment: payment,
paySession: paySession,
currentHeight: currentHeight,
finalCLTVDelta: uint16(payment.FinalCLTVDelta),
attempt: existingAttempt,
circuit: nil,
lastError: nil,
}
// If a timeout is specified, create a timeout channel. If no timeout is
// specified, the channel is left nil and will never abort the payment
// loop.
if payment.PayAttemptTimeout != 0 {
p.timeoutChan = time.After(payment.PayAttemptTimeout)
}
return p.resumePayment()
}
// tryApplyChannelUpdate tries to apply a channel update present in the failure
// message if any.
func (r *ChannelRouter) tryApplyChannelUpdate(rt *route.Route,
errorSourceIdx int, failure lnwire.FailureMessage) error {
// It makes no sense to apply our own channel updates.
if errorSourceIdx == 0 {
log.Errorf("Channel update of ourselves received")
return nil
}
// Extract channel update if the error contains one.
update := r.extractChannelUpdate(failure)
if update == nil {
return nil
}
// Parse pubkey to allow validation of the channel update. This should
// always succeed, otherwise there is something wrong in our
// implementation. Therefore return an error.
errVertex := rt.Hops[errorSourceIdx-1].PubKeyBytes
errSource, err := btcec.ParsePubKey(
errVertex[:], btcec.S256(),
)
if err != nil {
log.Errorf("Cannot parse pubkey: idx=%v, pubkey=%v",
errorSourceIdx, errVertex)
return err
}
// Apply channel update.
if !r.applyChannelUpdate(update, errSource) {
log.Debugf("Invalid channel update received: node=%x",
errVertex)
}
return nil
}
// processSendError analyzes the error for the payment attempt received from the
// switch and updates mission control and/or channel policies. Depending on the
// error type, this error is either the final outcome of the payment or we need
// to continue with an alternative route. This is indicated by the boolean
// return value.
func (r *ChannelRouter) processSendError(paymentID uint64, rt *route.Route,
sendErr error) *channeldb.FailureReason {
internalErrorReason := channeldb.FailureReasonError
reportFail := func(srcIdx *int,
msg lnwire.FailureMessage) *channeldb.FailureReason {
// Report outcome to mission control.
reason, err := r.cfg.MissionControl.ReportPaymentFail(
paymentID, rt, srcIdx, msg,
)
if err != nil {
log.Errorf("Error reporting payment result to mc: %v",
err)
return &internalErrorReason
}
return reason
}
if sendErr == htlcswitch.ErrUnreadableFailureMessage {
log.Tracef("Unreadable failure when sending htlc")
return reportFail(nil, nil)
}
// If an internal, non-forwarding error occurred, we can stop
// trying.
fErr, ok := sendErr.(*htlcswitch.ForwardingError)
if !ok {
return &internalErrorReason
}
failureMessage := fErr.FailureMessage
failureSourceIdx := fErr.FailureSourceIdx
// Apply channel update if the error contains one. For unknown
// failures, failureMessage is nil.
if failureMessage != nil {
err := r.tryApplyChannelUpdate(
rt, failureSourceIdx, failureMessage,
)
if err != nil {
return &internalErrorReason
}
}
log.Tracef("Node=%v reported failure when sending htlc",
failureSourceIdx)
return reportFail(&failureSourceIdx, failureMessage)
}
// extractChannelUpdate examines the error and extracts the channel update.
func (r *ChannelRouter) extractChannelUpdate(
failure lnwire.FailureMessage) *lnwire.ChannelUpdate {
var update *lnwire.ChannelUpdate
switch onionErr := failure.(type) {
case *lnwire.FailExpiryTooSoon:
update = &onionErr.Update
case *lnwire.FailAmountBelowMinimum:
update = &onionErr.Update
case *lnwire.FailFeeInsufficient:
update = &onionErr.Update
case *lnwire.FailIncorrectCltvExpiry:
update = &onionErr.Update
case *lnwire.FailChannelDisabled:
update = &onionErr.Update
case *lnwire.FailTemporaryChannelFailure:
update = onionErr.Update
}
return update
}
// applyChannelUpdate validates a channel update and if valid, applies it to the
// database. It returns a bool indicating whether the updates was successful.
func (r *ChannelRouter) applyChannelUpdate(msg *lnwire.ChannelUpdate,
pubKey *btcec.PublicKey) bool {
ch, _, _, err := r.GetChannelByID(msg.ShortChannelID)
if err != nil {
log.Errorf("Unable to retrieve channel by id: %v", err)
return false
}
if err := ValidateChannelUpdateAnn(pubKey, ch.Capacity, msg); err != nil {
log.Errorf("Unable to validate channel update: %v", err)
return false
}
err = r.UpdateEdge(&channeldb.ChannelEdgePolicy{
SigBytes: msg.Signature.ToSignatureBytes(),
ChannelID: msg.ShortChannelID.ToUint64(),
LastUpdate: time.Unix(int64(msg.Timestamp), 0),
MessageFlags: msg.MessageFlags,
ChannelFlags: msg.ChannelFlags,
TimeLockDelta: msg.TimeLockDelta,
MinHTLC: msg.HtlcMinimumMsat,
MaxHTLC: msg.HtlcMaximumMsat,
FeeBaseMSat: lnwire.MilliSatoshi(msg.BaseFee),
FeeProportionalMillionths: lnwire.MilliSatoshi(msg.FeeRate),
})
if err != nil && !IsError(err, ErrIgnored, ErrOutdated) {
log.Errorf("Unable to apply channel update: %v", err)
return false
}
return true
}
// AddNode is used to add information about a node to the router database. If
// the node with this pubkey is not present in an existing channel, it will
// be ignored.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) AddNode(node *channeldb.LightningNode) error {
rMsg := &routingMsg{
msg: node,
err: make(chan error, 1),
}
select {
case r.networkUpdates <- rMsg:
select {
case err := <-rMsg.err:
return err
case <-r.quit:
return ErrRouterShuttingDown
}
case <-r.quit:
return ErrRouterShuttingDown
}
}
// AddEdge is used to add edge/channel to the topology of the router, after all
// information about channel will be gathered this edge/channel might be used
// in construction of payment path.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) AddEdge(edge *channeldb.ChannelEdgeInfo) error {
rMsg := &routingMsg{
msg: edge,
err: make(chan error, 1),
}
select {
case r.networkUpdates <- rMsg:
select {
case err := <-rMsg.err:
return err
case <-r.quit:
return ErrRouterShuttingDown
}
case <-r.quit:
return ErrRouterShuttingDown
}
}
// UpdateEdge is used to update edge information, without this message edge
// considered as not fully constructed.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) UpdateEdge(update *channeldb.ChannelEdgePolicy) error {
rMsg := &routingMsg{
msg: update,
err: make(chan error, 1),
}
select {
case r.networkUpdates <- rMsg:
select {
case err := <-rMsg.err:
return err
case <-r.quit:
return ErrRouterShuttingDown
}
case <-r.quit:
return ErrRouterShuttingDown
}
}
// CurrentBlockHeight returns the block height from POV of the router subsystem.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) CurrentBlockHeight() (uint32, error) {
_, height, err := r.cfg.Chain.GetBestBlock()
return uint32(height), err
}
// GetChannelByID return the channel by the channel id.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) GetChannelByID(chanID lnwire.ShortChannelID) (
*channeldb.ChannelEdgeInfo,
*channeldb.ChannelEdgePolicy,
*channeldb.ChannelEdgePolicy, error) {
return r.cfg.Graph.FetchChannelEdgesByID(chanID.ToUint64())
}
// FetchLightningNode attempts to look up a target node by its identity public
// key. channeldb.ErrGraphNodeNotFound is returned if the node doesn't exist
// within the graph.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) FetchLightningNode(node route.Vertex) (*channeldb.LightningNode, error) {
pubKey, err := btcec.ParsePubKey(node[:], btcec.S256())
if err != nil {
return nil, fmt.Errorf("unable to parse raw public key: %v", err)
}
return r.cfg.Graph.FetchLightningNode(pubKey)
}
// ForEachNode is used to iterate over every node in router topology.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) ForEachNode(cb func(*channeldb.LightningNode) error) error {
return r.cfg.Graph.ForEachNode(nil, func(_ *bbolt.Tx, n *channeldb.LightningNode) error {
return cb(n)
})
}
// ForAllOutgoingChannels is used to iterate over all outgoing channels owned by
// the router.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) ForAllOutgoingChannels(cb func(*channeldb.ChannelEdgeInfo,
*channeldb.ChannelEdgePolicy) error) error {
return r.selfNode.ForEachChannel(nil, func(_ *bbolt.Tx, c *channeldb.ChannelEdgeInfo,
e, _ *channeldb.ChannelEdgePolicy) error {
if e == nil {
return fmt.Errorf("Channel from self node has no policy")
}
return cb(c, e)
})
}
// ForEachChannel is used to iterate over every known edge (channel) within our
// view of the channel graph.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) ForEachChannel(cb func(chanInfo *channeldb.ChannelEdgeInfo,
e1, e2 *channeldb.ChannelEdgePolicy) error) error {
return r.cfg.Graph.ForEachChannel(cb)
}
// AddProof updates the channel edge info with proof which is needed to
// properly announce the edge to the rest of the network.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) AddProof(chanID lnwire.ShortChannelID,
proof *channeldb.ChannelAuthProof) error {
info, _, _, err := r.cfg.Graph.FetchChannelEdgesByID(chanID.ToUint64())
if err != nil {
return err
}
info.AuthProof = proof
return r.cfg.Graph.UpdateChannelEdge(info)
}
// IsStaleNode returns true if the graph source has a node announcement for the
// target node with a more recent timestamp.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) IsStaleNode(node route.Vertex, timestamp time.Time) bool {
// If our attempt to assert that the node announcement is fresh fails,
// then we know that this is actually a stale announcement.
return r.assertNodeAnnFreshness(node, timestamp) != nil
}
// IsPublicNode determines whether the given vertex is seen as a public node in
// the graph from the graph's source node's point of view.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) IsPublicNode(node route.Vertex) (bool, error) {
return r.cfg.Graph.IsPublicNode(node)
}
// IsKnownEdge returns true if the graph source already knows of the passed
// channel ID either as a live or zombie edge.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) IsKnownEdge(chanID lnwire.ShortChannelID) bool {
_, _, exists, isZombie, _ := r.cfg.Graph.HasChannelEdge(chanID.ToUint64())
return exists || isZombie
}
// IsStaleEdgePolicy returns true if the graph soruce has a channel edge for
// the passed channel ID (and flags) that have a more recent timestamp.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) IsStaleEdgePolicy(chanID lnwire.ShortChannelID,
timestamp time.Time, flags lnwire.ChanUpdateChanFlags) bool {
edge1Timestamp, edge2Timestamp, exists, isZombie, err :=
r.cfg.Graph.HasChannelEdge(chanID.ToUint64())
if err != nil {
return false
}
// If we know of the edge as a zombie, then we'll make some additional
// checks to determine if the new policy is fresh.
if isZombie {
// When running with AssumeChannelValid, we also prune channels
// if both of their edges are disabled. We'll mark the new
// policy as stale if it remains disabled.
if r.cfg.AssumeChannelValid {
isDisabled := flags&lnwire.ChanUpdateDisabled ==
lnwire.ChanUpdateDisabled
if isDisabled {
return true
}
}
// Otherwise, we'll fall back to our usual ChannelPruneExpiry.
return time.Since(timestamp) > r.cfg.ChannelPruneExpiry
}
// If we don't know of the edge, then it means it's fresh (thus not
// stale).
if !exists {
return false
}
// As edges are directional edge node has a unique policy for the
// direction of the edge they control. Therefore we first check if we
// already have the most up to date information for that edge. If so,
// then we can exit early.
switch {
// A flag set of 0 indicates this is an announcement for the "first"
// node in the channel.
case flags&lnwire.ChanUpdateDirection == 0:
return !edge1Timestamp.Before(timestamp)
// Similarly, a flag set of 1 indicates this is an announcement for the
// "second" node in the channel.
case flags&lnwire.ChanUpdateDirection == 1:
return !edge2Timestamp.Before(timestamp)
}
return false
}
// MarkEdgeLive clears an edge from our zombie index, deeming it as live.
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) MarkEdgeLive(chanID lnwire.ShortChannelID) error {
return r.cfg.Graph.MarkEdgeLive(chanID.ToUint64())
}
// generateBandwidthHints is a helper function that's utilized the main
// findPath function in order to obtain hints from the lower layer w.r.t to the
// available bandwidth of edges on the network. Currently, we'll only obtain
// bandwidth hints for the edges we directly have open ourselves. Obtaining
// these hints allows us to reduce the number of extraneous attempts as we can
// skip channels that are inactive, or just don't have enough bandwidth to
// carry the payment.
func generateBandwidthHints(sourceNode *channeldb.LightningNode,
queryBandwidth func(*channeldb.ChannelEdgeInfo) lnwire.MilliSatoshi) (map[uint64]lnwire.MilliSatoshi, error) {
// First, we'll collect the set of outbound edges from the target
// source node.
var localChans []*channeldb.ChannelEdgeInfo
err := sourceNode.ForEachChannel(nil, func(tx *bbolt.Tx,
edgeInfo *channeldb.ChannelEdgeInfo,
_, _ *channeldb.ChannelEdgePolicy) error {
localChans = append(localChans, edgeInfo)
return nil
})
if err != nil {
return nil, err
}
// Now that we have all of our outbound edges, we'll populate the set
// of bandwidth hints, querying the lower switch layer for the most up
// to date values.
bandwidthHints := make(map[uint64]lnwire.MilliSatoshi)
for _, localChan := range localChans {
bandwidthHints[localChan.ChannelID] = queryBandwidth(localChan)
}
return bandwidthHints, nil
}
// runningAmounts keeps running amounts while the route is traversed.
type runningAmounts struct {
// amt is the intended amount to send via the route.
amt lnwire.MilliSatoshi
// max is the running maximum that the route can carry.
max lnwire.MilliSatoshi
}
// prependChannel returns a new set of running amounts that would result from
// prepending the given channel to the route. If canIncreaseAmt is set, the
// amount may be increased if it is too small to satisfy the channel's minimum
// htlc amount.
func (r *runningAmounts) prependChannel(policy *channeldb.ChannelEdgePolicy,
capacity btcutil.Amount, localChan bool, canIncreaseAmt bool) (
runningAmounts, error) {
// Determine max htlc value.
maxHtlc := lnwire.NewMSatFromSatoshis(capacity)
if policy.MessageFlags.HasMaxHtlc() {
maxHtlc = policy.MaxHTLC
}
amt := r.amt
// If we have a specific amount for which we are building the route,
// validate it against the channel constraints and return the new
// running amount.
if !canIncreaseAmt {
if amt < policy.MinHTLC || amt > maxHtlc {
return runningAmounts{}, fmt.Errorf("channel htlc "+
"constraints [%v - %v] violated with amt %v",
policy.MinHTLC, maxHtlc, amt)
}
// Update running amount by adding the fee for non-local
// channels.
if !localChan {
amt += policy.ComputeFee(amt)
}
return runningAmounts{
amt: amt,
}, nil
}
// Adapt the minimum amount to what this channel allows.
if policy.MinHTLC > r.amt {
amt = policy.MinHTLC
}
// Update the maximum amount too to be able to detect incompatible
// channels.
max := r.max
if maxHtlc < r.max {
max = maxHtlc
}
// If we get in the situation that the minimum amount exceeds the
// maximum amount (enforced further down stream), we have incompatible
// channel policies.
//
// There is possibility with pubkey addressing that we should have
// selected a different channel downstream, but we don't backtrack to
// try to fix that. It would complicate path finding while we expect
// this situation to be rare. The spec recommends to keep all policies
// towards a peer identical. If that is the case, there isn't a better
// channel that we should have selected.
if amt > max {
return runningAmounts{},
fmt.Errorf("incompatible channel policies: %v "+
"exceeds %v", amt, max)
}
// Add fees to the running amounts. Skip the source node fees as
// those do not need to be paid.
if !localChan {
amt += policy.ComputeFee(amt)
max += policy.ComputeFee(max)
}
return runningAmounts{amt: amt, max: max}, nil
}
// ErrNoChannel is returned when a route cannot be built because there are no
// channels that satisfy all requirements.
type ErrNoChannel struct {
position int
fromNode route.Vertex
}
// Error returns a human readable string describing the error.
func (e ErrNoChannel) Error() string {
return fmt.Sprintf("no matching outgoing channel available for "+
"node %v (%v)", e.position, e.fromNode)
}
// BuildRoute returns a fully specified route based on a list of pubkeys. If
// amount is nil, the minimum routable amount is used. To force a specific
// outgoing channel, use the outgoingChan parameter.
func (r *ChannelRouter) BuildRoute(amt *lnwire.MilliSatoshi,
hops []route.Vertex, outgoingChan *uint64,
finalCltvDelta int32) (*route.Route, error) {
log.Tracef("BuildRoute called: hopsCount=%v, amt=%v",
len(hops), amt)
// If no amount is specified, we need to build a route for the minimum
// amount that this route can carry.
useMinAmt := amt == nil
// We'll attempt to obtain a set of bandwidth hints that helps us select
// the best outgoing channel to use in case no outgoing channel is set.
bandwidthHints, err := generateBandwidthHints(
r.selfNode, r.cfg.QueryBandwidth,
)
if err != nil {
return nil, err
}
// Allocate a list that will contain the selected channels for this
// route.
edges := make([]*channeldb.ChannelEdgePolicy, len(hops))
// Keep a running amount and the maximum for this route.
amts := runningAmounts{
max: lnwire.MilliSatoshi(^uint64(0)),
}
if useMinAmt {
// For minimum amount routes, aim to deliver at least 1 msat to
// the destination. There are nodes in the wild that have a
// min_htlc channel policy of zero, which could lead to a zero
// amount payment being made.
amts.amt = 1
} else {
// If an amount is specified, we need to build a route that
// delivers exactly this amount to the final destination.
amts.amt = *amt
}
// Traverse hops backwards to accumulate fees in the running amounts.
source := r.selfNode.PubKeyBytes
for i := len(hops) - 1; i >= 0; i-- {
toNode := hops[i]
var fromNode route.Vertex
if i == 0 {
fromNode = source
} else {
fromNode = hops[i-1]
}
localChan := i == 0
// Iterate over candidate channels to select the channel
// to use for the final route.
var (
bestEdge *channeldb.ChannelEdgePolicy
bestAmts *runningAmounts
bestBandwidth lnwire.MilliSatoshi
)
cb := func(tx *bbolt.Tx,
edgeInfo *channeldb.ChannelEdgeInfo,
_, inEdge *channeldb.ChannelEdgePolicy) error {
chanID := edgeInfo.ChannelID
// Apply outgoing channel restriction is active.
if localChan && outgoingChan != nil &&
chanID != *outgoingChan {
return nil
}
// No unknown policy channels.
if inEdge == nil {
return nil
}
// Before we can process the edge, we'll need to
// fetch the node on the _other_ end of this
// channel as we may later need to iterate over
// the incoming edges of this node if we explore
// it further.
chanFromNode, err := edgeInfo.FetchOtherNode(
tx, toNode[:],
)
if err != nil {
return err
}
// Continue searching if this channel doesn't
// connect with the previous hop.
if chanFromNode.PubKeyBytes != fromNode {
return nil
}
// Validate whether this channel's policy is satisfied
// and obtain the new running amounts if this channel
// was to be selected.
newAmts, err := amts.prependChannel(
inEdge, edgeInfo.Capacity, localChan,
useMinAmt,
)
if err != nil {
log.Tracef("Skipping chan %v: %v",
inEdge.ChannelID, err)
return nil
}
// If we already have a best edge, check whether this
// edge is better.
bandwidth := bandwidthHints[chanID]
if bestEdge != nil {
if localChan {
// For local channels, better is defined
// as having more bandwidth. We try to
// maximize the chance that the returned
// route succeeds.
if bandwidth < bestBandwidth {
return nil
}
} else {
// For other channels, better is defined
// as lower fees for the amount to send.
// Normally all channels between two
// nodes should have the same policy,
// but in case not we minimize our cost
// here. Regular path finding would do
// the same.
if newAmts.amt > bestAmts.amt {
return nil
}
}
}
// If we get here, the current edge is better. Replace
// the best.
bestEdge = inEdge
bestAmts = &newAmts
bestBandwidth = bandwidth
return nil
}
err := r.cfg.Graph.ForEachNodeChannel(nil, toNode[:], cb)
if err != nil {
return nil, err
}
// There is no matching channel. Stop building the route here.
if bestEdge == nil {
return nil, ErrNoChannel{
fromNode: fromNode,
position: i,
}
}
log.Tracef("Select channel %v at position %v", bestEdge.ChannelID, i)
edges[i] = bestEdge
amts = *bestAmts
}
_, height, err := r.cfg.Chain.GetBestBlock()
if err != nil {
return nil, err
}
var receiverAmt lnwire.MilliSatoshi
if useMinAmt {
// We've calculated the minimum amount for the htlc that the
// source node hands out. The newRoute call below expects the
// amount that must reach the receiver after subtraction of fees
// along the way. Iterate over all edges to calculate the
// receiver amount.
receiverAmt = amts.amt
for _, edge := range edges[1:] {
receiverAmt -= edge.ComputeFeeFromIncoming(receiverAmt)
}
} else {
// Deliver the specified amount to the receiver.
receiverAmt = *amt
}
// Build and return the final route.
return newRoute(
receiverAmt, source, edges, uint32(height),
uint16(finalCltvDelta), nil,
)
}
|
package rpcfs
import (
"os"
"log"
"io/ioutil"
"rpc"
"github.com/hanwen/go-fuse/fuse"
"github.com/hanwen/go-fuse/unionfs"
)
type Task struct {
Argv []string
Env []string
Dir string
}
type WorkerTask struct {
fileServer *rpc.Client
mount string
rwDir string
tmpDir string
*Task
*fuse.MountState
}
func (me *WorkerTask) Stop() {
log.Println("unmounting..")
me.MountState.Unmount()
}
func (me *WorkerTask) Run() os.Error {
defer me.Stop()
rStdout, wStdout, err := os.Pipe()
rStderr, wStderr, err := os.Pipe()
attr := os.ProcAttr{
Dir: me.Task.Dir,
Env: me.Task.Env,
Files: []*os.File{nil, wStdout, wStderr},
}
// This is a security hole, but convenient for testing.
bin := "/tmp/chroot-suid"
cmd := []string{bin, me.mount}
newcmd := make([]string, len(cmd) + len(me.Task.Argv))
copy(newcmd, cmd)
copy(newcmd[len(cmd):], me.Task.Argv)
log.Println("starting cmd", newcmd)
proc, err := os.StartProcess(bin, newcmd, &attr)
if err != nil {
log.Println("Error", err)
return err
}
wStdout.Close()
wStderr.Close()
stdout, err := ioutil.ReadAll(rStdout)
stderr, err := ioutil.ReadAll(rStderr)
msg, err := proc.Wait(0)
log.Println("stdout:", string(stdout))
log.Println("stderr:", string(stderr))
log.Println("result:", msg, "dir:", me.tmpDir)
return err
}
func NewWorkerTask(server *rpc.Client, task *Task) (*WorkerTask, os.Error) {
w := &WorkerTask{}
tmpDir, err := ioutil.TempDir("", "rpcfs-tmp")
w.tmpDir = tmpDir
if err != nil {
return nil, err
}
w.rwDir = w.tmpDir + "/rw"
err = os.Mkdir(w.rwDir, 0700)
if err != nil {
return nil, err
}
w.mount = w.tmpDir + "/mnt"
err = os.Mkdir(w.mount, 0700)
if err != nil {
return nil, err
}
w.Task = task
fs := fuse.NewLoopbackFileSystem(w.rwDir)
roFs := NewRpcFs(server)
// High ttl, since all writes come through fuse.
ttl := 100.0
opts := unionfs.UnionFsOptions{
BranchCacheTTLSecs: ttl,
DeletionCacheTTLSecs:ttl,
DeletionDirName: "DELETIONS",
}
mOpts := fuse.FileSystemOptions{
EntryTimeout: ttl,
AttrTimeout: ttl,
NegativeTimeout: ttl,
}
ufs := unionfs.NewUnionFs("ufs", []fuse.FileSystem{fs, roFs}, opts)
conn := fuse.NewFileSystemConnector(ufs, &mOpts)
state := fuse.NewMountState(conn)
state.Mount(w.mount, &fuse.MountOptions{AllowOther: true})
if err != nil {
return nil, err
}
w.MountState = state
go state.Loop(true)
return w, nil
}
Run as nobody.
package rpcfs
import (
"fmt"
"os"
"log"
"io/ioutil"
"rpc"
"github.com/hanwen/go-fuse/fuse"
"github.com/hanwen/go-fuse/unionfs"
"os/user"
)
type Task struct {
Argv []string
Env []string
Dir string
}
type WorkerTask struct {
fileServer *rpc.Client
mount string
rwDir string
tmpDir string
*Task
*fuse.MountState
}
func (me *WorkerTask) Stop() {
log.Println("unmounting..")
me.MountState.Unmount()
}
func (me *WorkerTask) Run() os.Error {
defer me.Stop()
rStdout, wStdout, err := os.Pipe()
rStderr, wStderr, err := os.Pipe()
attr := os.ProcAttr{
Env: me.Task.Env,
Files: []*os.File{nil, wStdout, wStderr},
}
nobody, err := user.Lookup("nobody")
if err != nil {
return err
}
// TODO - configurable.
bin := "/tmp/chroot"
cmd := []string{bin, "-dir", me.Task.Dir,
"-uid", fmt.Sprintf("%d", nobody.Uid), "-gid", fmt.Sprintf("%d", nobody.Gid),
me.mount}
newcmd := make([]string, len(cmd) + len(me.Task.Argv))
copy(newcmd, cmd)
copy(newcmd[len(cmd):], me.Task.Argv)
log.Println("starting cmd", newcmd)
proc, err := os.StartProcess(bin, newcmd, &attr)
if err != nil {
log.Println("Error", err)
return err
}
wStdout.Close()
wStderr.Close()
stdout, err := ioutil.ReadAll(rStdout)
stderr, err := ioutil.ReadAll(rStderr)
msg, err := proc.Wait(0)
log.Println("stdout:", string(stdout))
log.Println("stderr:", string(stderr))
log.Println("result:", msg, "dir:", me.tmpDir)
return err
}
func NewWorkerTask(server *rpc.Client, task *Task) (*WorkerTask, os.Error) {
w := &WorkerTask{}
tmpDir, err := ioutil.TempDir("", "rpcfs-tmp")
w.tmpDir = tmpDir
if err != nil {
return nil, err
}
w.rwDir = w.tmpDir + "/rw"
err = os.Mkdir(w.rwDir, 0700)
if err != nil {
return nil, err
}
w.mount = w.tmpDir + "/mnt"
err = os.Mkdir(w.mount, 0700)
if err != nil {
return nil, err
}
w.Task = task
fs := fuse.NewLoopbackFileSystem(w.rwDir)
roFs := NewRpcFs(server)
// High ttl, since all writes come through fuse.
ttl := 100.0
opts := unionfs.UnionFsOptions{
BranchCacheTTLSecs: ttl,
DeletionCacheTTLSecs:ttl,
DeletionDirName: "DELETIONS",
}
mOpts := fuse.FileSystemOptions{
EntryTimeout: ttl,
AttrTimeout: ttl,
NegativeTimeout: ttl,
}
ufs := unionfs.NewUnionFs("ufs", []fuse.FileSystem{fs, roFs}, opts)
conn := fuse.NewFileSystemConnector(ufs, &mOpts)
state := fuse.NewMountState(conn)
state.Mount(w.mount, &fuse.MountOptions{AllowOther: true})
if err != nil {
return nil, err
}
w.MountState = state
go state.Loop(true)
return w, nil
}
|
package soyhtml
import (
"bytes"
"fmt"
"io"
"log"
"runtime"
"runtime/debug"
"github.com/robfig/soy/ast"
"github.com/robfig/soy/data"
soyt "github.com/robfig/soy/template"
)
// Logger collects output from {log} commands.
var Logger *log.Logger
// state represents the state of an execution.
type state struct {
namespace string
tmpl soyt.Template
wr io.Writer
node ast.Node // current node, for errors
registry soyt.Registry // the entire bundle of templates
val data.Value // temp value for expression being computed
context scope // variable scope
autoescape ast.AutoescapeType // escaping mode
ij data.Map // injected data available to all templates.
}
// at marks the state to be on node n, for error reporting.
func (s *state) at(node ast.Node) {
s.node = node
}
// errorf formats the error and terminates processing.
func (s *state) errorf(format string, args ...interface{}) {
format = fmt.Sprintf("template %s:%d: %s", s.tmpl.Node.Name,
s.registry.LineNumber(s.tmpl.Node.Name, s.node), format)
panic(fmt.Errorf(format, args...))
}
// errRecover is the handler that turns panics into returns from the top
// level of Parse.
func (s *state) errRecover(errp *error) {
if e := recover(); e != nil {
switch e := e.(type) {
case runtime.Error:
*errp = fmt.Errorf("template %s:%d: %v\n%v", s.tmpl.Node.Name,
s.registry.LineNumber(s.tmpl.Node.Name, s.node), e, string(debug.Stack()))
case error:
*errp = e
default:
*errp = fmt.Errorf("template %s:%d: %v", s.tmpl.Node.Name,
s.registry.LineNumber(s.tmpl.Node.Name, s.node), e)
}
}
}
// walk recursively goes through each node and executes the indicated logic and
// writes the output
func (s *state) walk(node ast.Node) {
s.val = data.Undefined{}
s.at(node)
switch node := node.(type) {
case *ast.SoyFileNode:
for _, node := range node.Body {
s.walk(node)
}
case *ast.TemplateNode:
if node.Autoescape != ast.AutoescapeUnspecified {
s.autoescape = node.Autoescape
}
s.walk(node.Body)
case *ast.ListNode:
for _, node := range node.Nodes {
s.walk(node)
}
// Output nodes ----------
case *ast.PrintNode:
s.evalPrint(node)
case *ast.RawTextNode:
if _, err := s.wr.Write(node.Text); err != nil {
s.errorf("%s", err)
}
case *ast.MsgNode:
s.walk(node.Body)
case *ast.CssNode:
var prefix = ""
if node.Expr != nil {
prefix = s.eval(node.Expr).String() + "-"
}
if _, err := io.WriteString(s.wr, prefix+node.Suffix); err != nil {
s.errorf("%s", err)
}
case *ast.DebuggerNode:
// nothing to do
case *ast.LogNode:
Logger.Print(string(s.renderBlock(node.Body)))
// Control flow ----------
case *ast.IfNode:
for _, cond := range node.Conds {
if cond.Cond == nil || s.eval(cond.Cond).Truthy() {
s.walk(cond.Body)
break
}
}
case *ast.ForNode:
var list, ok = s.eval(node.List).(data.List)
if !ok {
s.errorf("In for loop %q, %q does not resolve to a list.",
node.String(), node.List.String())
}
if len(list) == 0 {
if node.IfEmpty != nil {
s.walk(node.IfEmpty)
}
break
}
s.context.push()
for i, item := range list {
s.context.set(node.Var, item)
s.context.set(node.Var+"__index", data.Int(i))
s.context.set(node.Var+"__lastIndex", data.Int(len(list)-1))
s.walk(node.Body)
}
s.context.pop()
case *ast.SwitchNode:
var switchValue = s.eval(node.Value)
for _, caseNode := range node.Cases {
for _, caseValueNode := range caseNode.Values {
if switchValue.Equals(s.eval(caseValueNode)) {
s.walk(caseNode.Body)
return
}
}
if len(caseNode.Values) == 0 { // default/last case
s.walk(caseNode.Body)
return
}
}
case *ast.CallNode:
s.evalCall(node)
case *ast.LetValueNode:
s.context.set(node.Name, s.eval(node.Expr))
case *ast.LetContentNode:
s.context.set(node.Name, data.String(s.renderBlock(node.Body)))
// Values ----------
case *ast.NullNode:
s.val = data.Null{}
case *ast.StringNode:
s.val = data.String(node.Value)
case *ast.IntNode:
s.val = data.Int(node.Value)
case *ast.FloatNode:
s.val = data.Float(node.Value)
case *ast.BoolNode:
s.val = data.Bool(node.True)
case *ast.GlobalNode:
s.val = node.Value
case *ast.ListLiteralNode:
var items = make(data.List, len(node.Items))
for i, item := range node.Items {
items[i] = s.eval(item)
}
s.val = data.List(items)
case *ast.MapLiteralNode:
var items = make(data.Map, len(node.Items))
for k, v := range node.Items {
items[k] = s.eval(v)
}
s.val = data.Map(items)
case *ast.FunctionNode:
s.val = s.evalFunc(node)
case *ast.DataRefNode:
s.val = s.evalDataRef(node)
// Arithmetic operators ----------
case *ast.NegateNode:
switch arg := s.evaldef(node.Arg).(type) {
case data.Int:
s.val = data.Int(-arg)
case data.Float:
s.val = data.Float(-arg)
default:
s.errorf("can not negate non-number: %q", arg.String())
}
case *ast.AddNode:
var arg1, arg2 = s.eval2def(node.Arg1, node.Arg2)
switch {
case isInt(arg1) && isInt(arg2):
s.val = data.Int(arg1.(data.Int) + arg2.(data.Int))
case isString(arg1) || isString(arg2):
s.val = data.String(arg1.String() + arg2.String())
default:
s.val = data.Float(toFloat(arg1) + toFloat(arg2))
}
case *ast.SubNode:
var arg1, arg2 = s.eval2def(node.Arg1, node.Arg2)
switch {
case isInt(arg1) && isInt(arg2):
s.val = data.Int(arg1.(data.Int) - arg2.(data.Int))
default:
s.val = data.Float(toFloat(arg1) - toFloat(arg2))
}
case *ast.DivNode:
var arg1, arg2 = s.eval2def(node.Arg1, node.Arg2)
s.val = data.Float(toFloat(arg1) / toFloat(arg2))
case *ast.MulNode:
var arg1, arg2 = s.eval2def(node.Arg1, node.Arg2)
switch {
case isInt(arg1) && isInt(arg2):
s.val = data.Int(arg1.(data.Int) * arg2.(data.Int))
default:
s.val = data.Float(toFloat(arg1) * toFloat(arg2))
}
case *ast.ModNode:
var arg1, arg2 = s.eval2def(node.Arg1, node.Arg2)
s.val = data.Int(arg1.(data.Int) % arg2.(data.Int))
// Arithmetic comparisons ----------
case *ast.EqNode:
s.val = data.Bool(s.eval(node.Arg1).Equals(s.eval(node.Arg2)))
case *ast.NotEqNode:
s.val = data.Bool(!s.eval(node.Arg1).Equals(s.eval(node.Arg2)))
case *ast.LtNode:
s.val = data.Bool(toFloat(s.evaldef(node.Arg1)) < toFloat(s.evaldef(node.Arg2)))
case *ast.LteNode:
s.val = data.Bool(toFloat(s.evaldef(node.Arg1)) <= toFloat(s.evaldef(node.Arg2)))
case *ast.GtNode:
s.val = data.Bool(toFloat(s.evaldef(node.Arg1)) > toFloat(s.evaldef(node.Arg2)))
case *ast.GteNode:
s.val = data.Bool(toFloat(s.evaldef(node.Arg1)) >= toFloat(s.evaldef(node.Arg2)))
// Boolean operators ----------
case *ast.NotNode:
s.val = data.Bool(!s.eval(node.Arg).Truthy())
case *ast.AndNode:
s.val = data.Bool(s.eval(node.Arg1).Truthy() && s.eval(node.Arg2).Truthy())
case *ast.OrNode:
s.val = data.Bool(s.eval(node.Arg1).Truthy() || s.eval(node.Arg2).Truthy())
case *ast.ElvisNode:
var arg1 = s.eval(node.Arg1)
if arg1 != (data.Null{}) && arg1 != (data.Undefined{}) {
s.val = arg1
} else {
s.val = s.eval(node.Arg2)
}
case *ast.TernNode:
var arg1 = s.eval(node.Arg1)
if arg1.Truthy() {
s.val = s.eval(node.Arg2)
} else {
s.val = s.eval(node.Arg3)
}
default:
s.errorf("unknown node: %T", node)
}
}
func isInt(v data.Value) bool {
_, ok := v.(data.Int)
return ok
}
func isString(v data.Value) bool {
_, ok := v.(data.String)
return ok
}
func toFloat(v data.Value) float64 {
switch v := v.(type) {
case data.Int:
return float64(v)
case data.Float:
return float64(v)
case data.Undefined:
panic("not a number: undefined")
default:
panic(fmt.Sprintf("not a number: %v (%T)", v, v))
}
}
func (s *state) evalPrint(node *ast.PrintNode) {
s.walk(node.Arg)
if _, ok := s.val.(data.Undefined); ok {
s.errorf("In 'print' tag, expression %q evaluates to undefined.", node.Arg.String())
}
var escapeHtml = s.autoescape != ast.AutoescapeOff
var result = s.val
for _, directiveNode := range node.Directives {
var directive, ok = PrintDirectives[directiveNode.Name]
if !ok {
s.errorf("Print directive %q does not exist", directiveNode.Name)
}
if !checkNumArgs(directive.ValidArgLengths, len(directiveNode.Args)) {
s.errorf("Print directive %q called with %v args, expected one of: %v",
directiveNode.Name, len(directiveNode.Args), directive.ValidArgLengths)
}
var args = make([]data.Value, len(directiveNode.Args))
for i, arg := range directiveNode.Args {
args[i] = s.eval(arg)
}
func() {
defer func() {
if err := recover(); err != nil {
s.errorf("panic in %v: %v\nexecuted: %v(%q, %v)\n%v",
directiveNode, err,
directiveNode.Name, result, args,
string(debug.Stack()))
}
}()
result = directive.Apply(result, args)
}()
if directive.CancelAutoescape {
escapeHtml = false
}
}
var resultStr = result.String()
if escapeHtml {
htmlEscapeString(s.wr, resultStr)
} else {
if _, err := io.WriteString(s.wr, resultStr); err != nil {
s.errorf("%s", err)
}
}
}
func (s *state) evalCall(node *ast.CallNode) {
// get template node we're calling
var calledTmpl, ok = s.registry.Template(node.Name)
if !ok {
s.errorf("failed to find template: %s", node.Name)
}
// sort out the data to pass
var callData scope
if node.AllData {
callData = s.context.alldata()
} else if node.Data != nil {
result, ok := s.eval(node.Data).(data.Map)
if !ok {
s.errorf("In 'call' command %q, the data reference %q does not resolve to a map.",
node.String(), node.Data.String())
}
callData = scope{result}
callData.push()
} else {
callData = scope{make(data.Map)}
}
// resolve the params
for _, param := range node.Params {
switch param := param.(type) {
case *ast.CallParamValueNode:
callData.set(param.Key, s.eval(param.Value))
case *ast.CallParamContentNode:
callData.set(param.Key, data.New(string(s.renderBlock(param.Content))))
default:
s.errorf("unexpected call param type: %T", param)
}
}
state := &state{
tmpl: calledTmpl,
registry: s.registry,
namespace: calledTmpl.Namespace.Name,
autoescape: calledTmpl.Namespace.Autoescape,
wr: s.wr,
context: callData,
ij: s.ij,
}
state.walk(calledTmpl.Node)
}
// renderBlock is a helper that renders the given node to a temporary output
// buffer and returns that result. nothing is written to the main output.
func (s *state) renderBlock(node ast.Node) []byte {
var buf bytes.Buffer
origWriter := s.wr
s.wr = &buf
s.walk(node)
s.wr = origWriter
return buf.Bytes()
}
func checkNumArgs(allowedNumArgs []int, numArgs int) bool {
for _, length := range allowedNumArgs {
if numArgs == length {
return true
}
}
return false
}
func (s *state) evalFunc(node *ast.FunctionNode) data.Value {
if fn, ok := loopFuncs[node.Name]; ok {
return fn(s, node.Args[0].(*ast.DataRefNode).Key)
}
if fn, ok := Funcs[node.Name]; ok {
if !checkNumArgs(fn.ValidArgLengths, len(node.Args)) {
s.errorf("Function %q called with %v args, expected: %v",
node.Name, len(node.Args), fn.ValidArgLengths)
}
var args = make([]data.Value, len(node.Args))
for i, arg := range node.Args {
args[i] = s.eval(arg)
}
defer func() {
if err := recover(); err != nil {
s.errorf("panic in %s(%v): %v\n%v", node.Name, args, err, string(debug.Stack()))
}
}()
return fn.Apply(args)
}
s.errorf("unrecognized function name: %s", node.Name)
return nil
}
func (s *state) evalDataRef(node *ast.DataRefNode) data.Value {
// get the initial value
var ref data.Value
if node.Key == "ij" {
if s.ij == nil {
s.errorf("Injected data not provided, yet referenced: %q", node.String())
}
ref = s.ij
} else {
ref = s.context.lookup(node.Key)
}
if len(node.Access) == 0 {
return ref
}
// handle the accesses
for i, accessNode := range node.Access {
// resolve the index or key to look up.
var (
index int = -1
key string
)
switch node := accessNode.(type) {
case *ast.DataRefIndexNode:
index = node.Index
case *ast.DataRefKeyNode:
key = node.Key
case *ast.DataRefExprNode:
switch keyRef := s.eval(node.Arg).(type) {
case data.Int:
index = int(keyRef)
default:
key = keyRef.String()
}
default:
s.errorf("unexpected access node: %T", node)
}
// use the key/index, depending on the data type we're accessing.
switch obj := ref.(type) {
case data.Undefined, data.Null:
if isNullSafeAccess(accessNode) {
return data.Null{}
}
s.errorf("%q is null or undefined",
(&ast.DataRefNode{node.Pos, node.Key, node.Access[:i]}).String())
case data.List:
if index == -1 {
s.errorf("%q is a list, but was accessed with a non-integer index",
(&ast.DataRefNode{node.Pos, node.Key, node.Access[:i]}).String())
}
ref = obj.Index(index)
case data.Map:
if key == "" {
s.errorf("%q is a map, and requires a string key to access",
(&ast.DataRefNode{node.Pos, node.Key, node.Access[:i]}).String())
}
ref = obj.Key(key)
default:
s.errorf("While evaluating \"%v\", encountered non-collection"+
" just before accessing \"%v\".", node, accessNode)
}
}
return ref
}
// isNullSafeAccess returns true if the data ref access node is a nullsafe
// access.
func isNullSafeAccess(n ast.Node) bool {
switch node := n.(type) {
case *ast.DataRefIndexNode:
return node.NullSafe
case *ast.DataRefKeyNode:
return node.NullSafe
case *ast.DataRefExprNode:
return node.NullSafe
}
panic("unexpected")
}
// eval2def is a helper for binary ops. it evaluates the two given nodes and
// requires the result of each to not be Undefined.
func (s *state) eval2def(n1, n2 ast.Node) (data.Value, data.Value) {
return s.evaldef(n1), s.evaldef(n2)
}
func (s *state) eval(n ast.Node) data.Value {
var prev = s.node
s.walk(n)
s.node = prev
return s.val
}
func (s *state) evaldef(n ast.Node) data.Value {
var val = s.eval(n)
if _, ok := val.(data.Undefined); ok {
s.errorf("%v is undefined", n)
}
return val
}
var (
htmlQuot = []byte(""") // shorter than """
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
htmlAmp = []byte("&")
htmlLt = []byte("<")
htmlGt = []byte(">")
)
// htmlEscapeString is a modified veresion of the stdlib HTMLEscape routine
// escapes a string without making copies.
func htmlEscapeString(w io.Writer, str string) {
last := 0
for i := 0; i < len(str); i++ {
var html []byte
switch str[i] {
case '"':
html = htmlQuot
case '\'':
html = htmlApos
case '&':
html = htmlAmp
case '<':
html = htmlLt
case '>':
html = htmlGt
default:
continue
}
io.WriteString(w, str[last:i])
w.Write(html)
last = i + 1
}
io.WriteString(w, str[last:])
}
soyhtml/funcs: convert nil return value to data.Null to avoid panics elsewhere
package soyhtml
import (
"bytes"
"fmt"
"io"
"log"
"runtime"
"runtime/debug"
"github.com/robfig/soy/ast"
"github.com/robfig/soy/data"
soyt "github.com/robfig/soy/template"
)
// Logger collects output from {log} commands.
var Logger *log.Logger
// state represents the state of an execution.
type state struct {
namespace string
tmpl soyt.Template
wr io.Writer
node ast.Node // current node, for errors
registry soyt.Registry // the entire bundle of templates
val data.Value // temp value for expression being computed
context scope // variable scope
autoescape ast.AutoescapeType // escaping mode
ij data.Map // injected data available to all templates.
}
// at marks the state to be on node n, for error reporting.
func (s *state) at(node ast.Node) {
s.node = node
}
// errorf formats the error and terminates processing.
func (s *state) errorf(format string, args ...interface{}) {
format = fmt.Sprintf("template %s:%d: %s", s.tmpl.Node.Name,
s.registry.LineNumber(s.tmpl.Node.Name, s.node), format)
panic(fmt.Errorf(format, args...))
}
// errRecover is the handler that turns panics into returns from the top
// level of Parse.
func (s *state) errRecover(errp *error) {
if e := recover(); e != nil {
switch e := e.(type) {
case runtime.Error:
*errp = fmt.Errorf("template %s:%d: %v\n%v", s.tmpl.Node.Name,
s.registry.LineNumber(s.tmpl.Node.Name, s.node), e, string(debug.Stack()))
case error:
*errp = e
default:
*errp = fmt.Errorf("template %s:%d: %v", s.tmpl.Node.Name,
s.registry.LineNumber(s.tmpl.Node.Name, s.node), e)
}
}
}
// walk recursively goes through each node and executes the indicated logic and
// writes the output
func (s *state) walk(node ast.Node) {
s.val = data.Undefined{}
s.at(node)
switch node := node.(type) {
case *ast.SoyFileNode:
for _, node := range node.Body {
s.walk(node)
}
case *ast.TemplateNode:
if node.Autoescape != ast.AutoescapeUnspecified {
s.autoescape = node.Autoescape
}
s.walk(node.Body)
case *ast.ListNode:
for _, node := range node.Nodes {
s.walk(node)
}
// Output nodes ----------
case *ast.PrintNode:
s.evalPrint(node)
case *ast.RawTextNode:
if _, err := s.wr.Write(node.Text); err != nil {
s.errorf("%s", err)
}
case *ast.MsgNode:
s.walk(node.Body)
case *ast.CssNode:
var prefix = ""
if node.Expr != nil {
prefix = s.eval(node.Expr).String() + "-"
}
if _, err := io.WriteString(s.wr, prefix+node.Suffix); err != nil {
s.errorf("%s", err)
}
case *ast.DebuggerNode:
// nothing to do
case *ast.LogNode:
Logger.Print(string(s.renderBlock(node.Body)))
// Control flow ----------
case *ast.IfNode:
for _, cond := range node.Conds {
if cond.Cond == nil || s.eval(cond.Cond).Truthy() {
s.walk(cond.Body)
break
}
}
case *ast.ForNode:
var list, ok = s.eval(node.List).(data.List)
if !ok {
s.errorf("In for loop %q, %q does not resolve to a list.",
node.String(), node.List.String())
}
if len(list) == 0 {
if node.IfEmpty != nil {
s.walk(node.IfEmpty)
}
break
}
s.context.push()
for i, item := range list {
s.context.set(node.Var, item)
s.context.set(node.Var+"__index", data.Int(i))
s.context.set(node.Var+"__lastIndex", data.Int(len(list)-1))
s.walk(node.Body)
}
s.context.pop()
case *ast.SwitchNode:
var switchValue = s.eval(node.Value)
for _, caseNode := range node.Cases {
for _, caseValueNode := range caseNode.Values {
if switchValue.Equals(s.eval(caseValueNode)) {
s.walk(caseNode.Body)
return
}
}
if len(caseNode.Values) == 0 { // default/last case
s.walk(caseNode.Body)
return
}
}
case *ast.CallNode:
s.evalCall(node)
case *ast.LetValueNode:
s.context.set(node.Name, s.eval(node.Expr))
case *ast.LetContentNode:
s.context.set(node.Name, data.String(s.renderBlock(node.Body)))
// Values ----------
case *ast.NullNode:
s.val = data.Null{}
case *ast.StringNode:
s.val = data.String(node.Value)
case *ast.IntNode:
s.val = data.Int(node.Value)
case *ast.FloatNode:
s.val = data.Float(node.Value)
case *ast.BoolNode:
s.val = data.Bool(node.True)
case *ast.GlobalNode:
s.val = node.Value
case *ast.ListLiteralNode:
var items = make(data.List, len(node.Items))
for i, item := range node.Items {
items[i] = s.eval(item)
}
s.val = data.List(items)
case *ast.MapLiteralNode:
var items = make(data.Map, len(node.Items))
for k, v := range node.Items {
items[k] = s.eval(v)
}
s.val = data.Map(items)
case *ast.FunctionNode:
s.val = s.evalFunc(node)
case *ast.DataRefNode:
s.val = s.evalDataRef(node)
// Arithmetic operators ----------
case *ast.NegateNode:
switch arg := s.evaldef(node.Arg).(type) {
case data.Int:
s.val = data.Int(-arg)
case data.Float:
s.val = data.Float(-arg)
default:
s.errorf("can not negate non-number: %q", arg.String())
}
case *ast.AddNode:
var arg1, arg2 = s.eval2def(node.Arg1, node.Arg2)
switch {
case isInt(arg1) && isInt(arg2):
s.val = data.Int(arg1.(data.Int) + arg2.(data.Int))
case isString(arg1) || isString(arg2):
s.val = data.String(arg1.String() + arg2.String())
default:
s.val = data.Float(toFloat(arg1) + toFloat(arg2))
}
case *ast.SubNode:
var arg1, arg2 = s.eval2def(node.Arg1, node.Arg2)
switch {
case isInt(arg1) && isInt(arg2):
s.val = data.Int(arg1.(data.Int) - arg2.(data.Int))
default:
s.val = data.Float(toFloat(arg1) - toFloat(arg2))
}
case *ast.DivNode:
var arg1, arg2 = s.eval2def(node.Arg1, node.Arg2)
s.val = data.Float(toFloat(arg1) / toFloat(arg2))
case *ast.MulNode:
var arg1, arg2 = s.eval2def(node.Arg1, node.Arg2)
switch {
case isInt(arg1) && isInt(arg2):
s.val = data.Int(arg1.(data.Int) * arg2.(data.Int))
default:
s.val = data.Float(toFloat(arg1) * toFloat(arg2))
}
case *ast.ModNode:
var arg1, arg2 = s.eval2def(node.Arg1, node.Arg2)
s.val = data.Int(arg1.(data.Int) % arg2.(data.Int))
// Arithmetic comparisons ----------
case *ast.EqNode:
s.val = data.Bool(s.eval(node.Arg1).Equals(s.eval(node.Arg2)))
case *ast.NotEqNode:
s.val = data.Bool(!s.eval(node.Arg1).Equals(s.eval(node.Arg2)))
case *ast.LtNode:
s.val = data.Bool(toFloat(s.evaldef(node.Arg1)) < toFloat(s.evaldef(node.Arg2)))
case *ast.LteNode:
s.val = data.Bool(toFloat(s.evaldef(node.Arg1)) <= toFloat(s.evaldef(node.Arg2)))
case *ast.GtNode:
s.val = data.Bool(toFloat(s.evaldef(node.Arg1)) > toFloat(s.evaldef(node.Arg2)))
case *ast.GteNode:
s.val = data.Bool(toFloat(s.evaldef(node.Arg1)) >= toFloat(s.evaldef(node.Arg2)))
// Boolean operators ----------
case *ast.NotNode:
s.val = data.Bool(!s.eval(node.Arg).Truthy())
case *ast.AndNode:
s.val = data.Bool(s.eval(node.Arg1).Truthy() && s.eval(node.Arg2).Truthy())
case *ast.OrNode:
s.val = data.Bool(s.eval(node.Arg1).Truthy() || s.eval(node.Arg2).Truthy())
case *ast.ElvisNode:
var arg1 = s.eval(node.Arg1)
if arg1 != (data.Null{}) && arg1 != (data.Undefined{}) {
s.val = arg1
} else {
s.val = s.eval(node.Arg2)
}
case *ast.TernNode:
var arg1 = s.eval(node.Arg1)
if arg1.Truthy() {
s.val = s.eval(node.Arg2)
} else {
s.val = s.eval(node.Arg3)
}
default:
s.errorf("unknown node: %T", node)
}
}
func isInt(v data.Value) bool {
_, ok := v.(data.Int)
return ok
}
func isString(v data.Value) bool {
_, ok := v.(data.String)
return ok
}
func toFloat(v data.Value) float64 {
switch v := v.(type) {
case data.Int:
return float64(v)
case data.Float:
return float64(v)
case data.Undefined:
panic("not a number: undefined")
default:
panic(fmt.Sprintf("not a number: %v (%T)", v, v))
}
}
func (s *state) evalPrint(node *ast.PrintNode) {
s.walk(node.Arg)
if _, ok := s.val.(data.Undefined); ok {
s.errorf("In 'print' tag, expression %q evaluates to undefined.", node.Arg.String())
}
var escapeHtml = s.autoescape != ast.AutoescapeOff
var result = s.val
for _, directiveNode := range node.Directives {
var directive, ok = PrintDirectives[directiveNode.Name]
if !ok {
s.errorf("Print directive %q does not exist", directiveNode.Name)
}
if !checkNumArgs(directive.ValidArgLengths, len(directiveNode.Args)) {
s.errorf("Print directive %q called with %v args, expected one of: %v",
directiveNode.Name, len(directiveNode.Args), directive.ValidArgLengths)
}
var args = make([]data.Value, len(directiveNode.Args))
for i, arg := range directiveNode.Args {
args[i] = s.eval(arg)
}
func() {
defer func() {
if err := recover(); err != nil {
s.errorf("panic in %v: %v\nexecuted: %v(%q, %v)\n%v",
directiveNode, err,
directiveNode.Name, result, args,
string(debug.Stack()))
}
}()
result = directive.Apply(result, args)
}()
if directive.CancelAutoescape {
escapeHtml = false
}
}
var resultStr = result.String()
if escapeHtml {
htmlEscapeString(s.wr, resultStr)
} else {
if _, err := io.WriteString(s.wr, resultStr); err != nil {
s.errorf("%s", err)
}
}
}
func (s *state) evalCall(node *ast.CallNode) {
// get template node we're calling
var calledTmpl, ok = s.registry.Template(node.Name)
if !ok {
s.errorf("failed to find template: %s", node.Name)
}
// sort out the data to pass
var callData scope
if node.AllData {
callData = s.context.alldata()
} else if node.Data != nil {
result, ok := s.eval(node.Data).(data.Map)
if !ok {
s.errorf("In 'call' command %q, the data reference %q does not resolve to a map.",
node.String(), node.Data.String())
}
callData = scope{result}
callData.push()
} else {
callData = scope{make(data.Map)}
}
// resolve the params
for _, param := range node.Params {
switch param := param.(type) {
case *ast.CallParamValueNode:
callData.set(param.Key, s.eval(param.Value))
case *ast.CallParamContentNode:
callData.set(param.Key, data.New(string(s.renderBlock(param.Content))))
default:
s.errorf("unexpected call param type: %T", param)
}
}
state := &state{
tmpl: calledTmpl,
registry: s.registry,
namespace: calledTmpl.Namespace.Name,
autoescape: calledTmpl.Namespace.Autoescape,
wr: s.wr,
context: callData,
ij: s.ij,
}
state.walk(calledTmpl.Node)
}
// renderBlock is a helper that renders the given node to a temporary output
// buffer and returns that result. nothing is written to the main output.
func (s *state) renderBlock(node ast.Node) []byte {
var buf bytes.Buffer
origWriter := s.wr
s.wr = &buf
s.walk(node)
s.wr = origWriter
return buf.Bytes()
}
func checkNumArgs(allowedNumArgs []int, numArgs int) bool {
for _, length := range allowedNumArgs {
if numArgs == length {
return true
}
}
return false
}
func (s *state) evalFunc(node *ast.FunctionNode) data.Value {
if fn, ok := loopFuncs[node.Name]; ok {
return fn(s, node.Args[0].(*ast.DataRefNode).Key)
}
if fn, ok := Funcs[node.Name]; ok {
if !checkNumArgs(fn.ValidArgLengths, len(node.Args)) {
s.errorf("Function %q called with %v args, expected: %v",
node.Name, len(node.Args), fn.ValidArgLengths)
}
var args = make([]data.Value, len(node.Args))
for i, arg := range node.Args {
args[i] = s.eval(arg)
}
defer func() {
if err := recover(); err != nil {
s.errorf("panic in %s(%v): %v\n%v", node.Name, args, err, string(debug.Stack()))
}
}()
r := fn.Apply(args)
if r == nil {
return data.Null{}
}
return r
}
s.errorf("unrecognized function name: %s", node.Name)
panic("unreachable")
}
func (s *state) evalDataRef(node *ast.DataRefNode) data.Value {
// get the initial value
var ref data.Value
if node.Key == "ij" {
if s.ij == nil {
s.errorf("Injected data not provided, yet referenced: %q", node.String())
}
ref = s.ij
} else {
ref = s.context.lookup(node.Key)
}
if len(node.Access) == 0 {
return ref
}
// handle the accesses
for i, accessNode := range node.Access {
// resolve the index or key to look up.
var (
index int = -1
key string
)
switch node := accessNode.(type) {
case *ast.DataRefIndexNode:
index = node.Index
case *ast.DataRefKeyNode:
key = node.Key
case *ast.DataRefExprNode:
switch keyRef := s.eval(node.Arg).(type) {
case data.Int:
index = int(keyRef)
default:
key = keyRef.String()
}
default:
s.errorf("unexpected access node: %T", node)
}
// use the key/index, depending on the data type we're accessing.
switch obj := ref.(type) {
case data.Undefined, data.Null:
if isNullSafeAccess(accessNode) {
return data.Null{}
}
s.errorf("%q is null or undefined",
(&ast.DataRefNode{node.Pos, node.Key, node.Access[:i]}).String())
case data.List:
if index == -1 {
s.errorf("%q is a list, but was accessed with a non-integer index",
(&ast.DataRefNode{node.Pos, node.Key, node.Access[:i]}).String())
}
ref = obj.Index(index)
case data.Map:
if key == "" {
s.errorf("%q is a map, and requires a string key to access",
(&ast.DataRefNode{node.Pos, node.Key, node.Access[:i]}).String())
}
ref = obj.Key(key)
default:
s.errorf("While evaluating \"%v\", encountered non-collection"+
" just before accessing \"%v\".", node, accessNode)
}
}
return ref
}
// isNullSafeAccess returns true if the data ref access node is a nullsafe
// access.
func isNullSafeAccess(n ast.Node) bool {
switch node := n.(type) {
case *ast.DataRefIndexNode:
return node.NullSafe
case *ast.DataRefKeyNode:
return node.NullSafe
case *ast.DataRefExprNode:
return node.NullSafe
}
panic("unexpected")
}
// eval2def is a helper for binary ops. it evaluates the two given nodes and
// requires the result of each to not be Undefined.
func (s *state) eval2def(n1, n2 ast.Node) (data.Value, data.Value) {
return s.evaldef(n1), s.evaldef(n2)
}
func (s *state) eval(n ast.Node) data.Value {
var prev = s.node
s.walk(n)
s.node = prev
return s.val
}
func (s *state) evaldef(n ast.Node) data.Value {
var val = s.eval(n)
if _, ok := val.(data.Undefined); ok {
s.errorf("%v is undefined", n)
}
return val
}
var (
htmlQuot = []byte(""") // shorter than """
htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
htmlAmp = []byte("&")
htmlLt = []byte("<")
htmlGt = []byte(">")
)
// htmlEscapeString is a modified veresion of the stdlib HTMLEscape routine
// escapes a string without making copies.
func htmlEscapeString(w io.Writer, str string) {
last := 0
for i := 0; i < len(str); i++ {
var html []byte
switch str[i] {
case '"':
html = htmlQuot
case '\'':
html = htmlApos
case '&':
html = htmlAmp
case '<':
html = htmlLt
case '>':
html = htmlGt
default:
continue
}
io.WriteString(w, str[last:i])
w.Write(html)
last = i + 1
}
io.WriteString(w, str[last:])
}
|
disk: If could not get Inodes, return empty.
|
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"errors"
"fmt"
"io"
"net"
"net/url"
"os"
"regexp"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
"gopkg.in/gcfg.v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/credentialprovider/aws"
"k8s.io/kubernetes/pkg/types"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/service"
"k8s.io/kubernetes/pkg/api/unversioned"
)
const ProviderName = "aws"
// The tag name we use to differentiate multiple logically independent clusters running in the same AZ
const TagNameKubernetesCluster = "KubernetesCluster"
// The tag name we use to differentiate multiple services. Used currently for ELBs only.
const TagNameKubernetesService = "kubernetes.io/service-name"
// The tag name used on a subnet to designate that it should be used for internal ELBs
const TagNameSubnetInternalELB = "kubernetes.io/role/internal-elb"
// The tag name used on a subnet to designate that it should be used for internet ELBs
const TagNameSubnetPublicELB = "kubernetes.io/role/elb"
// Annotation used on the service to indicate that we want an internal ELB.
// Currently we accept only the value "0.0.0.0/0" - other values are an error.
// This lets us define more advanced semantics in future.
const ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/aws-load-balancer-internal"
// We sometimes read to see if something exists; then try to create it if we didn't find it
// This can fail once in a consistent system if done in parallel
// In an eventually consistent system, it could fail unboundedly
// MaxReadThenCreateRetries sets the maximum number of attempts we will make
const MaxReadThenCreateRetries = 30
// Default volume type for newly created Volumes
// TODO: Remove when user/admin can configure volume types and thus we don't
// need hardcoded defaults.
const DefaultVolumeType = "gp2"
// Amazon recommends having no more that 40 volumes attached to an instance,
// and at least one of those is for the system root volume.
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits
const DefaultMaxEBSVolumes = 39
// Used to call aws_credentials.Init() just once
var once sync.Once
// Abstraction over AWS, to allow mocking/other implementations
type AWSServices interface {
Compute(region string) (EC2, error)
LoadBalancing(region string) (ELB, error)
Autoscaling(region string) (ASG, error)
Metadata() (EC2Metadata, error)
}
// TODO: Should we rename this to AWS (EBS & ELB are not technically part of EC2)
// Abstraction over EC2, to allow mocking/other implementations
// Note that the DescribeX functions return a list, so callers don't need to deal with paging
type EC2 interface {
// Query EC2 for instances matching the filter
DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error)
// Attach a volume to an instance
AttachVolume(*ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error)
// Detach a volume from an instance it is attached to
DetachVolume(request *ec2.DetachVolumeInput) (resp *ec2.VolumeAttachment, err error)
// Lists volumes
DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error)
// Create an EBS volume
CreateVolume(request *ec2.CreateVolumeInput) (resp *ec2.Volume, err error)
// Delete an EBS volume
DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error)
DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error)
CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error)
DeleteSecurityGroup(request *ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error)
AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error)
RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error)
DescribeSubnets(*ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error)
CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error)
DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error)
CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error)
DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error)
ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error)
}
// This is a simple pass-through of the ELB client interface, which allows for testing
type ELB interface {
CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error)
DeleteLoadBalancer(*elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error)
DescribeLoadBalancers(*elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error)
RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error)
DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error)
DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error)
AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error)
CreateLoadBalancerListeners(*elb.CreateLoadBalancerListenersInput) (*elb.CreateLoadBalancerListenersOutput, error)
DeleteLoadBalancerListeners(*elb.DeleteLoadBalancerListenersInput) (*elb.DeleteLoadBalancerListenersOutput, error)
ApplySecurityGroupsToLoadBalancer(*elb.ApplySecurityGroupsToLoadBalancerInput) (*elb.ApplySecurityGroupsToLoadBalancerOutput, error)
ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error)
}
// This is a simple pass-through of the Autoscaling client interface, which allows for testing
type ASG interface {
UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error)
DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error)
}
// Abstraction over the AWS metadata service
type EC2Metadata interface {
// Query the EC2 metadata service (used to discover instance-id etc)
GetMetadata(path string) (string, error)
}
type VolumeOptions struct {
CapacityGB int
Tags map[string]string
}
// Volumes is an interface for managing cloud-provisioned volumes
// TODO: Allow other clouds to implement this
type Volumes interface {
// Attach the disk to the specified instance
// instanceName can be empty to mean "the instance on which we are running"
// Returns the device (e.g. /dev/xvdf) where we attached the volume
AttachDisk(diskName string, instanceName string, readOnly bool) (string, error)
// Detach the disk from the specified instance
// instanceName can be empty to mean "the instance on which we are running"
// Returns the device where the volume was attached
DetachDisk(diskName string, instanceName string) (string, error)
// Create a volume with the specified options
CreateDisk(volumeOptions *VolumeOptions) (volumeName string, err error)
// Delete the specified volume
// Returns true iff the volume was deleted
// If the was not found, returns (false, nil)
DeleteDisk(volumeName string) (bool, error)
// Get labels to apply to volume on creation
GetVolumeLabels(volumeName string) (map[string]string, error)
}
// InstanceGroups is an interface for managing cloud-managed instance groups / autoscaling instance groups
// TODO: Allow other clouds to implement this
type InstanceGroups interface {
// Set the size to the fixed size
ResizeInstanceGroup(instanceGroupName string, size int) error
// Queries the cloud provider for information about the specified instance group
DescribeInstanceGroup(instanceGroupName string) (InstanceGroupInfo, error)
}
// InstanceGroupInfo is returned by InstanceGroups.Describe, and exposes information about the group.
type InstanceGroupInfo interface {
// The number of instances currently running under control of this group
CurrentSize() (int, error)
}
// AWSCloud is an implementation of Interface, LoadBalancer and Instances for Amazon Web Services.
type AWSCloud struct {
ec2 EC2
elb ELB
asg ASG
metadata EC2Metadata
cfg *AWSCloudConfig
region string
vpcID string
filterTags map[string]string
// The AWS instance that we are running on
// Note that we cache some state in awsInstance (mountpoints), so we must preserve the instance
selfAWSInstance *awsInstance
mutex sync.Mutex
}
var _ Volumes = &AWSCloud{}
type AWSCloudConfig struct {
Global struct {
// TODO: Is there any use for this? We can get it from the instance metadata service
// Maybe if we're not running on AWS, e.g. bootstrap; for now it is not very useful
Zone string
KubernetesClusterTag string
//The aws provider creates an inbound rule per load balancer on the node security
//group. However, this can run into the AWS security group rule limit of 50 if
//many LoadBalancers are created.
//
//This flag disables the automatic ingress creation. It requires that the user
//has setup a rule that allows inbound traffic on kubelet ports from the
//local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000.
DisableSecurityGroupIngress bool
}
}
// awsSdkEC2 is an implementation of the EC2 interface, backed by aws-sdk-go
type awsSdkEC2 struct {
ec2 *ec2.EC2
}
type awsSDKProvider struct {
creds *credentials.Credentials
mutex sync.Mutex
regionDelayers map[string]*CrossRequestRetryDelay
}
func newAWSSDKProvider(creds *credentials.Credentials) *awsSDKProvider {
return &awsSDKProvider{
creds: creds,
regionDelayers: make(map[string]*CrossRequestRetryDelay),
}
}
func (p *awsSDKProvider) addHandlers(regionName string, h *request.Handlers) {
h.Sign.PushFrontNamed(request.NamedHandler{
Name: "k8s/logger",
Fn: awsHandlerLogger,
})
delayer := p.getCrossRequestRetryDelay(regionName)
if delayer != nil {
h.Sign.PushFrontNamed(request.NamedHandler{
Name: "k8s/delay-presign",
Fn: delayer.BeforeSign,
})
h.AfterRetry.PushFrontNamed(request.NamedHandler{
Name: "k8s/delay-afterretry",
Fn: delayer.AfterRetry,
})
}
}
// Get a CrossRequestRetryDelay, scoped to the region, not to the request.
// This means that when we hit a limit on a call, we will delay _all_ calls to the API.
// We do this to protect the AWS account from becoming overloaded and effectively locked.
// We also log when we hit request limits.
// Note that this delays the current goroutine; this is bad behaviour and will
// likely cause k8s to become slow or unresponsive for cloud operations.
// However, this throttle is intended only as a last resort. When we observe
// this throttling, we need to address the root cause (e.g. add a delay to a
// controller retry loop)
func (p *awsSDKProvider) getCrossRequestRetryDelay(regionName string) *CrossRequestRetryDelay {
p.mutex.Lock()
defer p.mutex.Unlock()
delayer, found := p.regionDelayers[regionName]
if !found {
delayer = NewCrossRequestRetryDelay()
p.regionDelayers[regionName] = delayer
}
return delayer
}
func (p *awsSDKProvider) Compute(regionName string) (EC2, error) {
service := ec2.New(session.New(&aws.Config{
Region: ®ionName,
Credentials: p.creds,
}))
p.addHandlers(regionName, &service.Handlers)
ec2 := &awsSdkEC2{
ec2: service,
}
return ec2, nil
}
func (p *awsSDKProvider) LoadBalancing(regionName string) (ELB, error) {
elbClient := elb.New(session.New(&aws.Config{
Region: ®ionName,
Credentials: p.creds,
}))
p.addHandlers(regionName, &elbClient.Handlers)
return elbClient, nil
}
func (p *awsSDKProvider) Autoscaling(regionName string) (ASG, error) {
client := autoscaling.New(session.New(&aws.Config{
Region: ®ionName,
Credentials: p.creds,
}))
p.addHandlers(regionName, &client.Handlers)
return client, nil
}
func (p *awsSDKProvider) Metadata() (EC2Metadata, error) {
client := ec2metadata.New(session.New(&aws.Config{}))
return client, nil
}
func stringPointerArray(orig []string) []*string {
if orig == nil {
return nil
}
n := make([]*string, len(orig))
for i := range orig {
n[i] = &orig[i]
}
return n
}
func isNilOrEmpty(s *string) bool {
return s == nil || *s == ""
}
func orEmpty(s *string) string {
if s == nil {
return ""
}
return *s
}
func newEc2Filter(name string, value string) *ec2.Filter {
filter := &ec2.Filter{
Name: aws.String(name),
Values: []*string{
aws.String(value),
},
}
return filter
}
func (self *AWSCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return errors.New("unimplemented")
}
func (c *AWSCloud) CurrentNodeName(hostname string) (string, error) {
return c.selfAWSInstance.nodeName, nil
}
// Implementation of EC2.Instances
func (self *awsSdkEC2) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) {
// Instances are paged
results := []*ec2.Instance{}
var nextToken *string
for {
response, err := self.ec2.DescribeInstances(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS instances: %v", err)
}
for _, reservation := range response.Reservations {
results = append(results, reservation.Instances...)
}
nextToken = response.NextToken
if isNilOrEmpty(nextToken) {
break
}
request.NextToken = nextToken
}
return results, nil
}
// Implements EC2.DescribeSecurityGroups
func (s *awsSdkEC2) DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error) {
// Security groups are not paged
response, err := s.ec2.DescribeSecurityGroups(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS security groups: %v", err)
}
return response.SecurityGroups, nil
}
func (s *awsSdkEC2) AttachVolume(request *ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error) {
return s.ec2.AttachVolume(request)
}
func (s *awsSdkEC2) DetachVolume(request *ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error) {
return s.ec2.DetachVolume(request)
}
func (s *awsSdkEC2) DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error) {
// Volumes are paged
results := []*ec2.Volume{}
var nextToken *string
for {
response, err := s.ec2.DescribeVolumes(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS volumes: %v", err)
}
results = append(results, response.Volumes...)
nextToken = response.NextToken
if isNilOrEmpty(nextToken) {
break
}
request.NextToken = nextToken
}
return results, nil
}
func (s *awsSdkEC2) CreateVolume(request *ec2.CreateVolumeInput) (resp *ec2.Volume, err error) {
return s.ec2.CreateVolume(request)
}
func (s *awsSdkEC2) DeleteVolume(request *ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error) {
return s.ec2.DeleteVolume(request)
}
func (s *awsSdkEC2) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) {
// Subnets are not paged
response, err := s.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS subnets: %v", err)
}
return response.Subnets, nil
}
func (s *awsSdkEC2) CreateSecurityGroup(request *ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) {
return s.ec2.CreateSecurityGroup(request)
}
func (s *awsSdkEC2) DeleteSecurityGroup(request *ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) {
return s.ec2.DeleteSecurityGroup(request)
}
func (s *awsSdkEC2) AuthorizeSecurityGroupIngress(request *ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) {
return s.ec2.AuthorizeSecurityGroupIngress(request)
}
func (s *awsSdkEC2) RevokeSecurityGroupIngress(request *ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) {
return s.ec2.RevokeSecurityGroupIngress(request)
}
func (s *awsSdkEC2) CreateTags(request *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) {
return s.ec2.CreateTags(request)
}
func (s *awsSdkEC2) DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error) {
// Not paged
response, err := s.ec2.DescribeRouteTables(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS route tables: %v", err)
}
return response.RouteTables, nil
}
func (s *awsSdkEC2) CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) {
return s.ec2.CreateRoute(request)
}
func (s *awsSdkEC2) DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) {
return s.ec2.DeleteRoute(request)
}
func (s *awsSdkEC2) ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) {
return s.ec2.ModifyInstanceAttribute(request)
}
func init() {
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
creds := credentials.NewChainCredentials(
[]credentials.Provider{
&credentials.EnvProvider{},
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(&aws.Config{})),
},
&credentials.SharedCredentialsProvider{},
})
aws := newAWSSDKProvider(creds)
return newAWSCloud(config, aws)
})
}
// readAWSCloudConfig reads an instance of AWSCloudConfig from config reader.
func readAWSCloudConfig(config io.Reader, metadata EC2Metadata) (*AWSCloudConfig, error) {
var cfg AWSCloudConfig
var err error
if config != nil {
err = gcfg.ReadInto(&cfg, config)
if err != nil {
return nil, err
}
}
if cfg.Global.Zone == "" {
if metadata != nil {
glog.Info("Zone not specified in configuration file; querying AWS metadata service")
cfg.Global.Zone, err = getAvailabilityZone(metadata)
if err != nil {
return nil, err
}
}
if cfg.Global.Zone == "" {
return nil, fmt.Errorf("no zone specified in configuration file")
}
}
return &cfg, nil
}
func getInstanceType(metadata EC2Metadata) (string, error) {
return metadata.GetMetadata("instance-type")
}
func getAvailabilityZone(metadata EC2Metadata) (string, error) {
return metadata.GetMetadata("placement/availability-zone")
}
func isRegionValid(region string) bool {
regions := [...]string{
"us-east-1",
"us-west-1",
"us-west-2",
"eu-west-1",
"eu-central-1",
"ap-southeast-1",
"ap-southeast-2",
"ap-northeast-1",
"cn-north-1",
"us-gov-west-1",
"sa-east-1",
}
for _, r := range regions {
if r == region {
return true
}
}
return false
}
// Derives the region from a valid az name.
// Returns an error if the az is known invalid (empty)
func azToRegion(az string) (string, error) {
if len(az) < 1 {
return "", fmt.Errorf("invalid (empty) AZ")
}
region := az[:len(az)-1]
return region, nil
}
// newAWSCloud creates a new instance of AWSCloud.
// AWSProvider and instanceId are primarily for tests
func newAWSCloud(config io.Reader, awsServices AWSServices) (*AWSCloud, error) {
metadata, err := awsServices.Metadata()
if err != nil {
return nil, fmt.Errorf("error creating AWS metadata client: %v", err)
}
cfg, err := readAWSCloudConfig(config, metadata)
if err != nil {
return nil, fmt.Errorf("unable to read AWS cloud provider config file: %v", err)
}
zone := cfg.Global.Zone
if len(zone) <= 1 {
return nil, fmt.Errorf("invalid AWS zone in config file: %s", zone)
}
regionName, err := azToRegion(zone)
if err != nil {
return nil, err
}
valid := isRegionValid(regionName)
if !valid {
return nil, fmt.Errorf("not a valid AWS zone (unknown region): %s", zone)
}
ec2, err := awsServices.Compute(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS EC2 client: %v", err)
}
elb, err := awsServices.LoadBalancing(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS ELB client: %v", err)
}
asg, err := awsServices.Autoscaling(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS autoscaling client: %v", err)
}
awsCloud := &AWSCloud{
ec2: ec2,
elb: elb,
asg: asg,
metadata: metadata,
cfg: cfg,
region: regionName,
}
selfAWSInstance, err := awsCloud.buildSelfAWSInstance()
if err != nil {
return nil, err
}
awsCloud.selfAWSInstance = selfAWSInstance
awsCloud.vpcID = selfAWSInstance.vpcID
filterTags := map[string]string{}
if cfg.Global.KubernetesClusterTag != "" {
filterTags[TagNameKubernetesCluster] = cfg.Global.KubernetesClusterTag
} else {
// TODO: Clean up double-API query
info, err := selfAWSInstance.describeInstance()
if err != nil {
return nil, err
}
for _, tag := range info.Tags {
if orEmpty(tag.Key) == TagNameKubernetesCluster {
filterTags[TagNameKubernetesCluster] = orEmpty(tag.Value)
}
}
}
if filterTags[TagNameKubernetesCluster] == "" {
glog.Errorf("Tag %q not found; Kuberentes may behave unexpectedly.", TagNameKubernetesCluster)
}
awsCloud.filterTags = filterTags
if len(filterTags) > 0 {
glog.Infof("AWS cloud filtering on tags: %v", filterTags)
} else {
glog.Infof("AWS cloud - no tag filtering")
}
// Register handler for ECR credentials
once.Do(func() {
aws_credentials.Init()
})
return awsCloud, nil
}
func (aws *AWSCloud) Clusters() (cloudprovider.Clusters, bool) {
return nil, false
}
// ProviderName returns the cloud provider ID.
func (aws *AWSCloud) ProviderName() string {
return ProviderName
}
// ScrubDNS filters DNS settings for pods.
func (aws *AWSCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
return nameservers, searches
}
// LoadBalancer returns an implementation of LoadBalancer for Amazon Web Services.
func (s *AWSCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return s, true
}
// Instances returns an implementation of Instances for Amazon Web Services.
func (aws *AWSCloud) Instances() (cloudprovider.Instances, bool) {
return aws, true
}
// Zones returns an implementation of Zones for Amazon Web Services.
func (aws *AWSCloud) Zones() (cloudprovider.Zones, bool) {
return aws, true
}
// Routes returns an implementation of Routes for Amazon Web Services.
func (aws *AWSCloud) Routes() (cloudprovider.Routes, bool) {
return aws, true
}
// NodeAddresses is an implementation of Instances.NodeAddresses.
func (c *AWSCloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
if c.selfAWSInstance.nodeName == name || len(name) == 0 {
addresses := []api.NodeAddress{}
internalIP, err := c.metadata.GetMetadata("local-ipv4")
if err != nil {
return nil, err
}
addresses = append(addresses, api.NodeAddress{Type: api.NodeInternalIP, Address: internalIP})
// Legacy compatibility: the private ip was the legacy host ip
addresses = append(addresses, api.NodeAddress{Type: api.NodeLegacyHostIP, Address: internalIP})
externalIP, err := c.metadata.GetMetadata("public-ipv4")
if err != nil {
//TODO: It would be nice to be able to determine the reason for the failure,
// but the AWS client masks all failures with the same error description.
glog.V(2).Info("Could not determine public IP from AWS metadata.")
} else {
addresses = append(addresses, api.NodeAddress{Type: api.NodeExternalIP, Address: externalIP})
}
return addresses, nil
}
instance, err := c.getInstanceByNodeName(name)
if err != nil {
return nil, err
}
addresses := []api.NodeAddress{}
if !isNilOrEmpty(instance.PrivateIpAddress) {
ipAddress := *instance.PrivateIpAddress
ip := net.ParseIP(ipAddress)
if ip == nil {
return nil, fmt.Errorf("EC2 instance had invalid private address: %s (%s)", orEmpty(instance.InstanceId), ipAddress)
}
addresses = append(addresses, api.NodeAddress{Type: api.NodeInternalIP, Address: ip.String()})
// Legacy compatibility: the private ip was the legacy host ip
addresses = append(addresses, api.NodeAddress{Type: api.NodeLegacyHostIP, Address: ip.String()})
}
// TODO: Other IP addresses (multiple ips)?
if !isNilOrEmpty(instance.PublicIpAddress) {
ipAddress := *instance.PublicIpAddress
ip := net.ParseIP(ipAddress)
if ip == nil {
return nil, fmt.Errorf("EC2 instance had invalid public address: %s (%s)", orEmpty(instance.InstanceId), ipAddress)
}
addresses = append(addresses, api.NodeAddress{Type: api.NodeExternalIP, Address: ip.String()})
}
return addresses, nil
}
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
func (c *AWSCloud) ExternalID(name string) (string, error) {
if c.selfAWSInstance.nodeName == name {
// We assume that if this is run on the instance itself, the instance exists and is alive
return c.selfAWSInstance.awsID, nil
} else {
// We must verify that the instance still exists
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
instance, err := c.findInstanceByNodeName(name)
if err != nil {
return "", err
}
if instance == nil {
return "", cloudprovider.InstanceNotFound
}
return orEmpty(instance.InstanceId), nil
}
}
// InstanceID returns the cloud provider ID of the specified instance.
func (c *AWSCloud) InstanceID(name string) (string, error) {
// In the future it is possible to also return an endpoint as:
// <endpoint>/<zone>/<instanceid>
if c.selfAWSInstance.nodeName == name {
return "/" + c.selfAWSInstance.availabilityZone + "/" + c.selfAWSInstance.awsID, nil
} else {
inst, err := c.getInstanceByNodeName(name)
if err != nil {
return "", err
}
return "/" + orEmpty(inst.Placement.AvailabilityZone) + "/" + orEmpty(inst.InstanceId), nil
}
}
// InstanceType returns the type of the specified instance.
func (c *AWSCloud) InstanceType(name string) (string, error) {
if c.selfAWSInstance.nodeName == name {
return c.selfAWSInstance.instanceType, nil
} else {
inst, err := c.getInstanceByNodeName(name)
if err != nil {
return "", err
}
return orEmpty(inst.InstanceType), nil
}
}
// Return a list of instances matching regex string.
func (s *AWSCloud) getInstancesByRegex(regex string) ([]string, error) {
filters := []*ec2.Filter{newEc2Filter("instance-state-name", "running")}
filters = s.addFilters(filters)
request := &ec2.DescribeInstancesInput{
Filters: filters,
}
instances, err := s.ec2.DescribeInstances(request)
if err != nil {
return []string{}, err
}
if len(instances) == 0 {
return []string{}, fmt.Errorf("no instances returned")
}
if strings.HasPrefix(regex, "'") && strings.HasSuffix(regex, "'") {
glog.Infof("Stripping quotes around regex (%s)", regex)
regex = regex[1 : len(regex)-1]
}
re, err := regexp.Compile(regex)
if err != nil {
return []string{}, err
}
matchingInstances := []string{}
for _, instance := range instances {
// Only return fully-ready instances when listing instances
// (vs a query by name, where we will return it if we find it)
if orEmpty(instance.State.Name) == "pending" {
glog.V(2).Infof("Skipping EC2 instance (pending): %s", *instance.InstanceId)
continue
}
privateDNSName := orEmpty(instance.PrivateDnsName)
if privateDNSName == "" {
glog.V(2).Infof("Skipping EC2 instance (no PrivateDNSName): %s",
orEmpty(instance.InstanceId))
continue
}
for _, tag := range instance.Tags {
if orEmpty(tag.Key) == "Name" && re.MatchString(orEmpty(tag.Value)) {
matchingInstances = append(matchingInstances, privateDNSName)
break
}
}
}
glog.V(2).Infof("Matched EC2 instances: %s", matchingInstances)
return matchingInstances, nil
}
// List is an implementation of Instances.List.
func (aws *AWSCloud) List(filter string) ([]string, error) {
// TODO: Should really use tag query. No need to go regexp.
return aws.getInstancesByRegex(filter)
}
// GetZone implements Zones.GetZone
func (c *AWSCloud) GetZone() (cloudprovider.Zone, error) {
return cloudprovider.Zone{
FailureDomain: c.selfAWSInstance.availabilityZone,
Region: c.region,
}, nil
}
// Abstraction around AWS Instance Types
// There isn't an API to get information for a particular instance type (that I know of)
type awsInstanceType struct {
}
// Used to represent a mount device for attaching an EBS volume
// This should be stored as a single letter (i.e. c, not sdc or /dev/sdc)
type mountDevice string
// TODO: Also return number of mounts allowed?
func (self *awsInstanceType) getEBSMountDevices() []mountDevice {
// See: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
// We will generate "ba", "bb", "bc"..."bz", "ca", ..., up to DefaultMaxEBSVolumes
devices := []mountDevice{}
count := 0
for first := 'b'; count < DefaultMaxEBSVolumes; first++ {
for second := 'a'; count < DefaultMaxEBSVolumes && second <= 'z'; second++ {
device := mountDevice(fmt.Sprintf("%c%c", first, second))
devices = append(devices, device)
count++
}
}
return devices
}
type awsInstance struct {
ec2 EC2
// id in AWS
awsID string
// node name in k8s
nodeName string
// availability zone the instance resides in
availabilityZone string
// ID of VPC the instance resides in
vpcID string
// ID of subnet the instance resides in
subnetID string
// instance type
instanceType string
mutex sync.Mutex
// We keep an active list of devices we have assigned but not yet
// attached, to avoid a race condition where we assign a device mapping
// and then get a second request before we attach the volume
attaching map[mountDevice]string
}
// newAWSInstance creates a new awsInstance object
func newAWSInstance(ec2Service EC2, instance *ec2.Instance) *awsInstance {
az := ""
if instance.Placement != nil {
az = aws.StringValue(instance.Placement.AvailabilityZone)
}
self := &awsInstance{
ec2: ec2Service,
awsID: aws.StringValue(instance.InstanceId),
nodeName: aws.StringValue(instance.PrivateDnsName),
availabilityZone: az,
instanceType: aws.StringValue(instance.InstanceType),
vpcID: aws.StringValue(instance.VpcId),
subnetID: aws.StringValue(instance.SubnetId),
}
self.attaching = make(map[mountDevice]string)
return self
}
// Gets the awsInstanceType that models the instance type of this instance
func (self *awsInstance) getInstanceType() *awsInstanceType {
// TODO: Make this real
awsInstanceType := &awsInstanceType{}
return awsInstanceType
}
// Gets the full information about this instance from the EC2 API
func (self *awsInstance) describeInstance() (*ec2.Instance, error) {
instanceID := self.awsID
request := &ec2.DescribeInstancesInput{
InstanceIds: []*string{&instanceID},
}
instances, err := self.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, fmt.Errorf("no instances found for instance: %s", self.awsID)
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for instance: %s", self.awsID)
}
return instances[0], nil
}
// Gets the mountDevice already assigned to the volume, or assigns an unused mountDevice.
// If the volume is already assigned, this will return the existing mountDevice with alreadyAttached=true.
// Otherwise the mountDevice is assigned by finding the first available mountDevice, and it is returned with alreadyAttached=false.
func (self *awsInstance) getMountDevice(volumeID string, assign bool) (assigned mountDevice, alreadyAttached bool, err error) {
instanceType := self.getInstanceType()
if instanceType == nil {
return "", false, fmt.Errorf("could not get instance type for instance: %s", self.awsID)
}
// We lock to prevent concurrent mounts from conflicting
// We may still conflict if someone calls the API concurrently,
// but the AWS API will then fail one of the two attach operations
self.mutex.Lock()
defer self.mutex.Unlock()
info, err := self.describeInstance()
if err != nil {
return "", false, err
}
deviceMappings := map[mountDevice]string{}
for _, blockDevice := range info.BlockDeviceMappings {
name := aws.StringValue(blockDevice.DeviceName)
if strings.HasPrefix(name, "/dev/sd") {
name = name[7:]
}
if strings.HasPrefix(name, "/dev/xvd") {
name = name[8:]
}
if len(name) < 1 || len(name) > 2 {
glog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName))
}
deviceMappings[mountDevice(name)] = aws.StringValue(blockDevice.Ebs.VolumeId)
}
for mountDevice, volume := range self.attaching {
deviceMappings[mountDevice] = volume
}
// Check to see if this volume is already assigned a device on this machine
for mountDevice, mappingVolumeID := range deviceMappings {
if volumeID == mappingVolumeID {
if assign {
glog.Warningf("Got assignment call for already-assigned volume: %s@%s", mountDevice, mappingVolumeID)
}
return mountDevice, true, nil
}
}
if !assign {
return mountDevice(""), false, nil
}
// Check all the valid mountpoints to see if any of them are free
valid := instanceType.getEBSMountDevices()
chosen := mountDevice("")
for _, mountDevice := range valid {
_, found := deviceMappings[mountDevice]
if !found {
chosen = mountDevice
break
}
}
if chosen == "" {
glog.Warningf("Could not assign a mount device (all in use?). mappings=%v, valid=%v", deviceMappings, valid)
return "", false, fmt.Errorf("Too many EBS volumes attached to node %s.", self.nodeName)
}
self.attaching[chosen] = volumeID
glog.V(2).Infof("Assigned mount device %s -> volume %s", chosen, volumeID)
return chosen, false, nil
}
func (self *awsInstance) endAttaching(volumeID string, mountDevice mountDevice) {
self.mutex.Lock()
defer self.mutex.Unlock()
existingVolumeID, found := self.attaching[mountDevice]
if !found {
glog.Errorf("endAttaching on non-allocated device")
return
}
if volumeID != existingVolumeID {
glog.Errorf("endAttaching on device assigned to different volume")
return
}
glog.V(2).Infof("Releasing mount device mapping: %s -> volume %s", mountDevice, volumeID)
delete(self.attaching, mountDevice)
}
type awsDisk struct {
ec2 EC2
// Name in k8s
name string
// id in AWS
awsID string
}
func newAWSDisk(aws *AWSCloud, name string) (*awsDisk, error) {
// name looks like aws://availability-zone/id
// The original idea of the URL-style name was to put the AZ into the
// host, so we could find the AZ immediately from the name without
// querying the API. But it turns out we don't actually need it for
// Ubernetes-Lite, as we put the AZ into the labels on the PV instead.
// However, if in future we want to support Ubernetes-Lite
// volume-awareness without using PersistentVolumes, we likely will
// want the AZ in the host.
if !strings.HasPrefix(name, "aws://") {
name = "aws://" + "" + "/" + name
}
url, err := url.Parse(name)
if err != nil {
// TODO: Maybe we should pass a URL into the Volume functions
return nil, fmt.Errorf("Invalid disk name (%s): %v", name, err)
}
if url.Scheme != "aws" {
return nil, fmt.Errorf("Invalid scheme for AWS volume (%s)", name)
}
awsID := url.Path
if len(awsID) > 1 && awsID[0] == '/' {
awsID = awsID[1:]
}
// TODO: Regex match?
if strings.Contains(awsID, "/") || !strings.HasPrefix(awsID, "vol-") {
return nil, fmt.Errorf("Invalid format for AWS volume (%s)", name)
}
disk := &awsDisk{ec2: aws.ec2, name: name, awsID: awsID}
return disk, nil
}
// Gets the full information about this volume from the EC2 API
func (self *awsDisk) describeVolume() (*ec2.Volume, error) {
volumeID := self.awsID
request := &ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeID},
}
volumes, err := self.ec2.DescribeVolumes(request)
if err != nil {
return nil, fmt.Errorf("error querying ec2 for volume info: %v", err)
}
if len(volumes) == 0 {
return nil, fmt.Errorf("no volumes found for volume: %s", self.awsID)
}
if len(volumes) > 1 {
return nil, fmt.Errorf("multiple volumes found for volume: %s", self.awsID)
}
return volumes[0], nil
}
// waitForAttachmentStatus polls until the attachment status is the expected value
// TODO(justinsb): return (bool, error)
func (self *awsDisk) waitForAttachmentStatus(status string) error {
// TODO: There may be a faster way to get this when we're attaching locally
attempt := 0
maxAttempts := 60
for {
info, err := self.describeVolume()
if err != nil {
return err
}
if len(info.Attachments) > 1 {
glog.Warningf("Found multiple attachments for volume: %v", info)
}
attachmentStatus := ""
for _, attachment := range info.Attachments {
if attachmentStatus != "" {
glog.Warning("Found multiple attachments: ", info)
}
if attachment.State != nil {
attachmentStatus = *attachment.State
} else {
// Shouldn't happen, but don't panic...
glog.Warning("Ignoring nil attachment state: ", attachment)
}
}
if attachmentStatus == "" {
attachmentStatus = "detached"
}
if attachmentStatus == status {
return nil
}
glog.V(2).Infof("Waiting for volume state: actual=%s, desired=%s", attachmentStatus, status)
attempt++
if attempt > maxAttempts {
glog.Warningf("Timeout waiting for volume state: actual=%s, desired=%s", attachmentStatus, status)
return errors.New("Timeout waiting for volume state")
}
time.Sleep(1 * time.Second)
}
}
// Deletes the EBS disk
func (self *awsDisk) deleteVolume() (bool, error) {
request := &ec2.DeleteVolumeInput{VolumeId: aws.String(self.awsID)}
_, err := self.ec2.DeleteVolume(request)
if err != nil {
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "InvalidVolume.NotFound" {
return false, nil
}
}
return false, fmt.Errorf("error deleting EBS volumes: %v", err)
}
return true, nil
}
// Builds the awsInstance for the EC2 instance on which we are running.
// This is called when the AWSCloud is initialized, and should not be called otherwise (because the awsInstance for the local instance is a singleton with drive mapping state)
func (c *AWSCloud) buildSelfAWSInstance() (*awsInstance, error) {
if c.selfAWSInstance != nil {
panic("do not call buildSelfAWSInstance directly")
}
instanceId, err := c.metadata.GetMetadata("instance-id")
if err != nil {
return nil, fmt.Errorf("error fetching instance-id from ec2 metadata service: %v", err)
}
// We want to fetch the hostname via the EC2 metadata service
// (`GetMetadata("local-hostname")`): But see #11543 - we need to use
// the EC2 API to get the privateDnsName in case of a private DNS zone
// e.g. mydomain.io, because the metadata service returns the wrong
// hostname. Once we're doing that, we might as well get all our
// information from the instance returned by the EC2 API - it is a
// single API call to get all the information, and it means we don't
// have two code paths.
instance, err := c.getInstanceByID(instanceId)
if err != nil {
return nil, fmt.Errorf("error finding instance %s: %v", instanceId, err)
}
return newAWSInstance(c.ec2, instance), nil
}
// Gets the awsInstance with node-name nodeName, or the 'self' instance if nodeName == ""
func (c *AWSCloud) getAwsInstance(nodeName string) (*awsInstance, error) {
var awsInstance *awsInstance
if nodeName == "" {
awsInstance = c.selfAWSInstance
} else {
instance, err := c.getInstanceByNodeName(nodeName)
if err != nil {
return nil, fmt.Errorf("error finding instance %s: %v", nodeName, err)
}
awsInstance = newAWSInstance(c.ec2, instance)
}
return awsInstance, nil
}
// Implements Volumes.AttachDisk
func (c *AWSCloud) AttachDisk(diskName string, instanceName string, readOnly bool) (string, error) {
disk, err := newAWSDisk(c, diskName)
if err != nil {
return "", err
}
awsInstance, err := c.getAwsInstance(instanceName)
if err != nil {
return "", err
}
if readOnly {
// TODO: We could enforce this when we mount the volume (?)
// TODO: We could also snapshot the volume and attach copies of it
return "", errors.New("AWS volumes cannot be mounted read-only")
}
mountDevice, alreadyAttached, err := awsInstance.getMountDevice(disk.awsID, true)
if err != nil {
return "", err
}
// Inside the instance, the mountpoint always looks like /dev/xvdX (?)
hostDevice := "/dev/xvd" + string(mountDevice)
// In the EC2 API, it is sometimes is /dev/sdX and sometimes /dev/xvdX
// We are running on the node here, so we check if /dev/xvda exists to determine this
ec2Device := "/dev/xvd" + string(mountDevice)
if _, err := os.Stat("/dev/xvda"); os.IsNotExist(err) {
ec2Device = "/dev/sd" + string(mountDevice)
}
// attachEnded is set to true if the attach operation completed
// (successfully or not)
attachEnded := false
defer func() {
if attachEnded {
awsInstance.endAttaching(disk.awsID, mountDevice)
}
}()
if !alreadyAttached {
request := &ec2.AttachVolumeInput{
Device: aws.String(ec2Device),
InstanceId: aws.String(awsInstance.awsID),
VolumeId: aws.String(disk.awsID),
}
attachResponse, err := c.ec2.AttachVolume(request)
if err != nil {
attachEnded = true
// TODO: Check if the volume was concurrently attached?
return "", fmt.Errorf("Error attaching EBS volume: %v", err)
}
glog.V(2).Infof("AttachVolume request returned %v", attachResponse)
}
err = disk.waitForAttachmentStatus("attached")
if err != nil {
return "", err
}
attachEnded = true
return hostDevice, nil
}
// Implements Volumes.DetachDisk
func (aws *AWSCloud) DetachDisk(diskName string, instanceName string) (string, error) {
disk, err := newAWSDisk(aws, diskName)
if err != nil {
return "", err
}
awsInstance, err := aws.getAwsInstance(instanceName)
if err != nil {
return "", err
}
mountDevice, alreadyAttached, err := awsInstance.getMountDevice(disk.awsID, false)
if err != nil {
return "", err
}
if !alreadyAttached {
glog.Warning("DetachDisk called on non-attached disk: ", diskName)
// TODO: Continue? Tolerate non-attached error in DetachVolume?
}
request := ec2.DetachVolumeInput{
InstanceId: &awsInstance.awsID,
VolumeId: &disk.awsID,
}
response, err := aws.ec2.DetachVolume(&request)
if err != nil {
return "", fmt.Errorf("error detaching EBS volume: %v", err)
}
if response == nil {
return "", errors.New("no response from DetachVolume")
}
err = disk.waitForAttachmentStatus("detached")
if err != nil {
return "", err
}
if mountDevice != "" {
awsInstance.endAttaching(disk.awsID, mountDevice)
}
hostDevicePath := "/dev/xvd" + string(mountDevice)
return hostDevicePath, err
}
// Implements Volumes.CreateVolume
func (s *AWSCloud) CreateDisk(volumeOptions *VolumeOptions) (string, error) {
// Default to creating in the current zone
// TODO: Spread across zones?
createAZ := s.selfAWSInstance.availabilityZone
// TODO: Should we tag this with the cluster id (so it gets deleted when the cluster does?)
request := &ec2.CreateVolumeInput{}
request.AvailabilityZone = &createAZ
volSize := int64(volumeOptions.CapacityGB)
request.Size = &volSize
request.VolumeType = aws.String(DefaultVolumeType)
response, err := s.ec2.CreateVolume(request)
if err != nil {
return "", err
}
az := orEmpty(response.AvailabilityZone)
awsID := orEmpty(response.VolumeId)
volumeName := "aws://" + az + "/" + awsID
// apply tags
tags := make(map[string]string)
for k, v := range volumeOptions.Tags {
tags[k] = v
}
if s.getClusterName() != "" {
tags[TagNameKubernetesCluster] = s.getClusterName()
}
if len(tags) != 0 {
if err := s.createTags(awsID, tags); err != nil {
// delete the volume and hope it succeeds
_, delerr := s.DeleteDisk(volumeName)
if delerr != nil {
// delete did not succeed, we have a stray volume!
return "", fmt.Errorf("error tagging volume %s, could not delete the volume: %v", volumeName, delerr)
}
return "", fmt.Errorf("error tagging volume %s: %v", volumeName, err)
}
}
return volumeName, nil
}
// Implements Volumes.DeleteDisk
func (c *AWSCloud) DeleteDisk(volumeName string) (bool, error) {
awsDisk, err := newAWSDisk(c, volumeName)
if err != nil {
return false, err
}
return awsDisk.deleteVolume()
}
// Implements Volumes.GetVolumeLabels
func (c *AWSCloud) GetVolumeLabels(volumeName string) (map[string]string, error) {
awsDisk, err := newAWSDisk(c, volumeName)
if err != nil {
return nil, err
}
info, err := awsDisk.describeVolume()
if err != nil {
return nil, err
}
labels := make(map[string]string)
az := aws.StringValue(info.AvailabilityZone)
if az == "" {
return nil, fmt.Errorf("volume did not have AZ information: %q", info.VolumeId)
}
labels[unversioned.LabelZoneFailureDomain] = az
region, err := azToRegion(az)
if err != nil {
return nil, err
}
labels[unversioned.LabelZoneRegion] = region
return labels, nil
}
// Gets the current load balancer state
func (s *AWSCloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, error) {
request := &elb.DescribeLoadBalancersInput{}
request.LoadBalancerNames = []*string{&name}
response, err := s.elb.DescribeLoadBalancers(request)
if err != nil {
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "LoadBalancerNotFound" {
return nil, nil
}
}
return nil, err
}
var ret *elb.LoadBalancerDescription
for _, loadBalancer := range response.LoadBalancerDescriptions {
if ret != nil {
glog.Errorf("Found multiple load balancers with name: %s", name)
}
ret = loadBalancer
}
return ret, nil
}
// Retrieves instance's vpc id from metadata
func (self *AWSCloud) findVPCID() (string, error) {
macs, err := self.metadata.GetMetadata("network/interfaces/macs/")
if err != nil {
return "", fmt.Errorf("Could not list interfaces of the instance: %v", err)
}
// loop over interfaces, first vpc id returned wins
for _, macPath := range strings.Split(macs, "\n") {
if len(macPath) == 0 {
continue
}
url := fmt.Sprintf("network/interfaces/macs/%svpc-id", macPath)
vpcID, err := self.metadata.GetMetadata(url)
if err != nil {
continue
}
return vpcID, nil
}
return "", fmt.Errorf("Could not find VPC ID in instance metadata")
}
// Retrieves the specified security group from the AWS API, or returns nil if not found
func (s *AWSCloud) findSecurityGroup(securityGroupId string) (*ec2.SecurityGroup, error) {
describeSecurityGroupsRequest := &ec2.DescribeSecurityGroupsInput{
GroupIds: []*string{&securityGroupId},
}
// We don't apply our tag filters because we are retrieving by ID
groups, err := s.ec2.DescribeSecurityGroups(describeSecurityGroupsRequest)
if err != nil {
glog.Warningf("Error retrieving security group: %q", err)
return nil, err
}
if len(groups) == 0 {
return nil, nil
}
if len(groups) != 1 {
// This should not be possible - ids should be unique
return nil, fmt.Errorf("multiple security groups found with same id %q", securityGroupId)
}
group := groups[0]
return group, nil
}
func isEqualIntPointer(l, r *int64) bool {
if l == nil {
return r == nil
}
if r == nil {
return l == nil
}
return *l == *r
}
func isEqualStringPointer(l, r *string) bool {
if l == nil {
return r == nil
}
if r == nil {
return l == nil
}
return *l == *r
}
func ipPermissionExists(newPermission, existing *ec2.IpPermission, compareGroupUserIDs bool) bool {
if !isEqualIntPointer(newPermission.FromPort, existing.FromPort) {
return false
}
if !isEqualIntPointer(newPermission.ToPort, existing.ToPort) {
return false
}
if !isEqualStringPointer(newPermission.IpProtocol, existing.IpProtocol) {
return false
}
// Check only if newPermission is a subset of existing. Usually it has zero or one elements.
// Not doing actual CIDR math yet; not clear it's needed, either.
glog.V(4).Infof("Comparing %v to %v", newPermission, existing)
if len(newPermission.IpRanges) > len(existing.IpRanges) {
return false
}
for j := range newPermission.IpRanges {
found := false
for k := range existing.IpRanges {
if isEqualStringPointer(newPermission.IpRanges[j].CidrIp, existing.IpRanges[k].CidrIp) {
found = true
break
}
}
if found == false {
return false
}
}
for _, leftPair := range newPermission.UserIdGroupPairs {
for _, rightPair := range existing.UserIdGroupPairs {
if isEqualUserGroupPair(leftPair, rightPair, compareGroupUserIDs) {
return true
}
}
return false
}
return true
}
func isEqualUserGroupPair(l, r *ec2.UserIdGroupPair, compareGroupUserIDs bool) bool {
glog.V(2).Infof("Comparing %v to %v", *l.GroupId, *r.GroupId)
if isEqualStringPointer(l.GroupId, r.GroupId) {
if compareGroupUserIDs {
if isEqualStringPointer(l.UserId, r.UserId) {
return true
}
} else {
return true
}
}
return false
}
// Makes sure the security group ingress is exactly the specified permissions
// Returns true if and only if changes were made
// The security group must already exist
func (s *AWSCloud) setSecurityGroupIngress(securityGroupId string, permissions IPPermissionSet) (bool, error) {
group, err := s.findSecurityGroup(securityGroupId)
if err != nil {
glog.Warning("Error retrieving security group", err)
return false, err
}
if group == nil {
return false, fmt.Errorf("security group not found: %s", securityGroupId)
}
glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupId, group.IpPermissions)
actual := NewIPPermissionSet(group.IpPermissions...)
// EC2 groups rules together, for example combining:
//
// { Port=80, Range=[A] } and { Port=80, Range=[B] }
//
// into { Port=80, Range=[A,B] }
//
// We have to ungroup them, because otherwise the logic becomes really
// complicated, and also because if we have Range=[A,B] and we try to
// add Range=[A] then EC2 complains about a duplicate rule.
permissions = permissions.Ungroup()
actual = actual.Ungroup()
remove := actual.Difference(permissions)
add := permissions.Difference(actual)
if add.Len() == 0 && remove.Len() == 0 {
return false, nil
}
// TODO: There is a limit in VPC of 100 rules per security group, so we
// probably should try grouping or combining to fit under this limit.
// But this is only used on the ELB security group currently, so it
// would require (ports * CIDRS) > 100. Also, it isn't obvious exactly
// how removing single permissions from compound rules works, and we
// don't want to accidentally open more than intended while we're
// applying changes.
if add.Len() != 0 {
glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupId, add.List())
request := &ec2.AuthorizeSecurityGroupIngressInput{}
request.GroupId = &securityGroupId
request.IpPermissions = add.List()
_, err = s.ec2.AuthorizeSecurityGroupIngress(request)
if err != nil {
return false, fmt.Errorf("error authorizing security group ingress: %v", err)
}
}
if remove.Len() != 0 {
glog.V(2).Infof("Remove security group ingress: %s %v", securityGroupId, remove.List())
request := &ec2.RevokeSecurityGroupIngressInput{}
request.GroupId = &securityGroupId
request.IpPermissions = remove.List()
_, err = s.ec2.RevokeSecurityGroupIngress(request)
if err != nil {
return false, fmt.Errorf("error revoking security group ingress: %v", err)
}
}
return true, nil
}
// Makes sure the security group includes the specified permissions
// Returns true if and only if changes were made
// The security group must already exist
func (s *AWSCloud) addSecurityGroupIngress(securityGroupId string, addPermissions []*ec2.IpPermission) (bool, error) {
group, err := s.findSecurityGroup(securityGroupId)
if err != nil {
glog.Warningf("Error retrieving security group: %v", err)
return false, err
}
if group == nil {
return false, fmt.Errorf("security group not found: %s", securityGroupId)
}
glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupId, group.IpPermissions)
changes := []*ec2.IpPermission{}
for _, addPermission := range addPermissions {
hasUserID := false
for i := range addPermission.UserIdGroupPairs {
if addPermission.UserIdGroupPairs[i].UserId != nil {
hasUserID = true
}
}
found := false
for _, groupPermission := range group.IpPermissions {
if ipPermissionExists(addPermission, groupPermission, hasUserID) {
found = true
break
}
}
if !found {
changes = append(changes, addPermission)
}
}
if len(changes) == 0 {
return false, nil
}
glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupId, changes)
request := &ec2.AuthorizeSecurityGroupIngressInput{}
request.GroupId = &securityGroupId
request.IpPermissions = changes
_, err = s.ec2.AuthorizeSecurityGroupIngress(request)
if err != nil {
glog.Warning("Error authorizing security group ingress", err)
return false, fmt.Errorf("error authorizing security group ingress: %v", err)
}
return true, nil
}
// Makes sure the security group no longer includes the specified permissions
// Returns true if and only if changes were made
// If the security group no longer exists, will return (false, nil)
func (s *AWSCloud) removeSecurityGroupIngress(securityGroupId string, removePermissions []*ec2.IpPermission) (bool, error) {
group, err := s.findSecurityGroup(securityGroupId)
if err != nil {
glog.Warningf("Error retrieving security group: %v", err)
return false, err
}
if group == nil {
glog.Warning("Security group not found: ", securityGroupId)
return false, nil
}
changes := []*ec2.IpPermission{}
for _, removePermission := range removePermissions {
hasUserID := false
for i := range removePermission.UserIdGroupPairs {
if removePermission.UserIdGroupPairs[i].UserId != nil {
hasUserID = true
}
}
var found *ec2.IpPermission
for _, groupPermission := range group.IpPermissions {
if ipPermissionExists(removePermission, groupPermission, hasUserID) {
found = removePermission
break
}
}
if found != nil {
changes = append(changes, found)
}
}
if len(changes) == 0 {
return false, nil
}
glog.V(2).Infof("Removing security group ingress: %s %v", securityGroupId, changes)
request := &ec2.RevokeSecurityGroupIngressInput{}
request.GroupId = &securityGroupId
request.IpPermissions = changes
_, err = s.ec2.RevokeSecurityGroupIngress(request)
if err != nil {
glog.Warningf("Error revoking security group ingress: %v", err)
return false, err
}
return true, nil
}
// Ensure that a resource has the correct tags
// If it has no tags, we assume that this was a problem caused by an error in between creation and tagging,
// and we add the tags. If it has a different cluster's tags, that is an error.
func (s *AWSCloud) ensureClusterTags(resourceID string, tags []*ec2.Tag) error {
actualTags := make(map[string]string)
for _, tag := range tags {
actualTags[aws.StringValue(tag.Key)] = aws.StringValue(tag.Value)
}
addTags := make(map[string]string)
for k, expected := range s.filterTags {
actual := actualTags[k]
if actual == expected {
continue
}
if actual == "" {
glog.Warningf("Resource %q was missing expected cluster tag %q. Will add (with value %q)", resourceID, k, expected)
addTags[k] = expected
} else {
return fmt.Errorf("resource %q has tag belonging to another cluster: %q=%q (expected %q)", resourceID, k, actual, expected)
}
}
if err := s.createTags(resourceID, addTags); err != nil {
return fmt.Errorf("error adding missing tags to resource %q: %v", resourceID, err)
}
return nil
}
// Makes sure the security group exists.
// For multi-cluster isolation, name must be globally unique, for example derived from the service UUID.
// Returns the security group id or error
func (s *AWSCloud) ensureSecurityGroup(name string, description string) (string, error) {
groupID := ""
attempt := 0
for {
attempt++
request := &ec2.DescribeSecurityGroupsInput{}
filters := []*ec2.Filter{
newEc2Filter("group-name", name),
newEc2Filter("vpc-id", s.vpcID),
}
// Note that we do _not_ add our tag filters; group-name + vpc-id is the EC2 primary key.
// However, we do check that it matches our tags.
// If it doesn't have any tags, we tag it; this is how we recover if we failed to tag before.
// If it has a different cluster's tags, that is an error.
// This shouldn't happen because name is expected to be globally unique (UUID derived)
request.Filters = filters
securityGroups, err := s.ec2.DescribeSecurityGroups(request)
if err != nil {
return "", err
}
if len(securityGroups) >= 1 {
if len(securityGroups) > 1 {
glog.Warningf("Found multiple security groups with name: %q", name)
}
err := s.ensureClusterTags(aws.StringValue(securityGroups[0].GroupId), securityGroups[0].Tags)
if err != nil {
return "", err
}
return aws.StringValue(securityGroups[0].GroupId), nil
}
createRequest := &ec2.CreateSecurityGroupInput{}
createRequest.VpcId = &s.vpcID
createRequest.GroupName = &name
createRequest.Description = &description
createResponse, err := s.ec2.CreateSecurityGroup(createRequest)
if err != nil {
ignore := false
switch err := err.(type) {
case awserr.Error:
if err.Code() == "InvalidGroup.Duplicate" && attempt < MaxReadThenCreateRetries {
glog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry")
ignore = true
}
}
if !ignore {
glog.Error("Error creating security group: ", err)
return "", err
}
time.Sleep(1 * time.Second)
} else {
groupID = orEmpty(createResponse.GroupId)
break
}
}
if groupID == "" {
return "", fmt.Errorf("created security group, but id was not returned: %s", name)
}
err := s.createTags(groupID, s.filterTags)
if err != nil {
// If we retry, ensureClusterTags will recover from this - it
// will add the missing tags. We could delete the security
// group here, but that doesn't feel like the right thing, as
// the caller is likely to retry the create
return "", fmt.Errorf("error tagging security group: %v", err)
}
return groupID, nil
}
// createTags calls EC2 CreateTags, but adds retry-on-failure logic
// We retry mainly because if we create an object, we cannot tag it until it is "fully created" (eventual consistency)
// The error code varies though (depending on what we are tagging), so we simply retry on all errors
func (s *AWSCloud) createTags(resourceID string, tags map[string]string) error {
if tags == nil || len(tags) == 0 {
return nil
}
var awsTags []*ec2.Tag
for k, v := range tags {
tag := &ec2.Tag{
Key: aws.String(k),
Value: aws.String(v),
}
awsTags = append(awsTags, tag)
}
request := &ec2.CreateTagsInput{}
request.Resources = []*string{&resourceID}
request.Tags = awsTags
// TODO: We really should do exponential backoff here
attempt := 0
maxAttempts := 60
for {
_, err := s.ec2.CreateTags(request)
if err == nil {
return nil
}
// We could check that the error is retryable, but the error code changes based on what we are tagging
// SecurityGroup: InvalidGroup.NotFound
attempt++
if attempt > maxAttempts {
glog.Warningf("Failed to create tags (too many attempts): %v", err)
return err
}
glog.V(2).Infof("Failed to create tags; will retry. Error was %v", err)
time.Sleep(1 * time.Second)
}
}
// Finds the value for a given tag.
func findTag(tags []*ec2.Tag, key string) (string, bool) {
for _, tag := range tags {
if aws.StringValue(tag.Key) == key {
return aws.StringValue(tag.Value), true
}
}
return "", false
}
// Finds the subnets associated with the cluster, by matching tags.
// For maximal backwards compatability, if no subnets are tagged, it will fall-back to the current subnet.
// However, in future this will likely be treated as an error.
func (c *AWSCloud) findSubnets() ([]*ec2.Subnet, error) {
request := &ec2.DescribeSubnetsInput{}
vpcIDFilter := newEc2Filter("vpc-id", c.vpcID)
filters := []*ec2.Filter{vpcIDFilter}
filters = c.addFilters(filters)
request.Filters = filters
subnets, err := c.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error describing subnets: %v", err)
}
if len(subnets) != 0 {
return subnets, nil
}
// Fall back to the current instance subnets, if nothing is tagged
glog.Warningf("No tagged subnets found; will fall-back to the current subnet only. This is likely to be an error in a future version of k8s.")
request = &ec2.DescribeSubnetsInput{}
filters = []*ec2.Filter{newEc2Filter("subnet-id", c.selfAWSInstance.subnetID)}
request.Filters = filters
subnets, err = c.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error describing subnets: %v", err)
}
return subnets, nil
}
// Finds the subnets to use for an ELB we are creating.
// Normal (Internet-facing) ELBs must use public subnets, so we skip private subnets.
// Internal ELBs can use public or private subnets, but if we have a private subnet we should prefer that.
func (s *AWSCloud) findELBSubnets(internalELB bool) ([]string, error) {
vpcIDFilter := newEc2Filter("vpc-id", s.vpcID)
subnets, err := s.findSubnets()
if err != nil {
return nil, err
}
rRequest := &ec2.DescribeRouteTablesInput{}
rRequest.Filters = []*ec2.Filter{vpcIDFilter}
rt, err := s.ec2.DescribeRouteTables(rRequest)
if err != nil {
return nil, fmt.Errorf("error describe route table: %v", err)
}
subnetsByAZ := make(map[string]*ec2.Subnet)
for _, subnet := range subnets {
az := aws.StringValue(subnet.AvailabilityZone)
id := aws.StringValue(subnet.SubnetId)
if az == "" || id == "" {
glog.Warningf("Ignoring subnet with empty az/id: %v", subnet)
continue
}
isPublic, err := isSubnetPublic(rt, id)
if err != nil {
return nil, err
}
if !internalELB && !isPublic {
glog.V(2).Infof("Ignoring private subnet for public ELB %q", id)
continue
}
existing := subnetsByAZ[az]
if existing == nil {
subnetsByAZ[az] = subnet
continue
}
// Try to break the tie using a tag
var tagName string
if internalELB {
tagName = TagNameSubnetInternalELB
} else {
tagName = TagNameSubnetPublicELB
}
_, existingHasTag := findTag(existing.Tags, tagName)
_, subnetHasTag := findTag(subnet.Tags, tagName)
if existingHasTag != subnetHasTag {
if subnetHasTag {
subnetsByAZ[az] = subnet
}
continue
}
// TODO: Should this be an error?
glog.Warningf("Found multiple subnets in AZ %q; making arbitrary choice between subnets %q and %q", az, *existing.SubnetId, *subnet.SubnetId)
continue
}
var subnetIDs []string
for _, subnet := range subnetsByAZ {
subnetIDs = append(subnetIDs, aws.StringValue(subnet.SubnetId))
}
return subnetIDs, nil
}
func isSubnetPublic(rt []*ec2.RouteTable, subnetID string) (bool, error) {
var subnetTable *ec2.RouteTable
for _, table := range rt {
for _, assoc := range table.Associations {
if aws.StringValue(assoc.SubnetId) == subnetID {
subnetTable = table
break
}
}
}
if subnetTable == nil {
// If there is no explicit association, the subnet will be implicitly
// associated with the VPC's main routing table.
for _, table := range rt {
for _, assoc := range table.Associations {
if aws.BoolValue(assoc.Main) == true {
glog.V(4).Infof("Assuming implicit use of main routing table %s for %s",
aws.StringValue(table.RouteTableId), subnetID)
subnetTable = table
break
}
}
}
}
if subnetTable == nil {
return false, fmt.Errorf("Could not locate routing table for subnet %s", subnetID)
}
for _, route := range subnetTable.Routes {
// There is no direct way in the AWS API to determine if a subnet is public or private.
// A public subnet is one which has an internet gateway route
// we look for the gatewayId and make sure it has the prefix of igw to differentiate
// from the default in-subnet route which is called "local"
// or other virtual gateway (starting with vgv)
// or vpc peering connections (starting with pcx).
if strings.HasPrefix(aws.StringValue(route.GatewayId), "igw") {
return true, nil
}
}
return false, nil
}
// EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer
func (s *AWSCloud) EnsureLoadBalancer(apiService *api.Service, hosts []string, annotations map[string]string) (*api.LoadBalancerStatus, error) {
glog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)",
apiService.Namespace, apiService.Name, s.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, hosts, annotations)
if apiService.Spec.SessionAffinity != api.ServiceAffinityNone {
// ELB supports sticky sessions, but only when configured for HTTP/HTTPS
return nil, fmt.Errorf("unsupported load balancer affinity: %v", apiService.Spec.SessionAffinity)
}
if len(apiService.Spec.Ports) == 0 {
return nil, fmt.Errorf("requested load balancer with no ports")
}
for _, port := range apiService.Spec.Ports {
if port.Protocol != api.ProtocolTCP {
return nil, fmt.Errorf("Only TCP LoadBalancer is supported for AWS ELB")
}
}
if apiService.Spec.LoadBalancerIP != "" {
return nil, fmt.Errorf("LoadBalancerIP cannot be specified for AWS ELB")
}
instances, err := s.getInstancesByNodeNames(hosts)
if err != nil {
return nil, err
}
sourceRanges, err := service.GetLoadBalancerSourceRanges(annotations)
if err != nil {
return nil, err
}
// Determine if this is tagged as an Internal ELB
internalELB := false
internalAnnotation := annotations[ServiceAnnotationLoadBalancerInternal]
if internalAnnotation != "" {
if internalAnnotation != "0.0.0.0/0" {
return nil, fmt.Errorf("annotation %q=%q detected, but the only value supported currently is 0.0.0.0/0", ServiceAnnotationLoadBalancerInternal, internalAnnotation)
}
if !service.IsAllowAll(sourceRanges) {
// TODO: Unify the two annotations
return nil, fmt.Errorf("source-range annotation cannot be combined with the internal-elb annotation")
}
internalELB = true
}
// Find the subnets that the ELB will live in
subnetIDs, err := s.findELBSubnets(internalELB)
if err != nil {
glog.Error("Error listing subnets in VPC: ", err)
return nil, err
}
// Bail out early if there are no subnets
if len(subnetIDs) == 0 {
return nil, fmt.Errorf("could not find any suitable subnets for creating the ELB")
}
loadBalancerName := cloudprovider.GetLoadBalancerName(apiService)
serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name}
// Create a security group for the load balancer
var securityGroupID string
{
sgName := "k8s-elb-" + loadBalancerName
sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName)
securityGroupID, err = s.ensureSecurityGroup(sgName, sgDescription)
if err != nil {
glog.Error("Error creating load balancer security group: ", err)
return nil, err
}
ec2SourceRanges := []*ec2.IpRange{}
for _, sourceRange := range sourceRanges.StringSlice() {
ec2SourceRanges = append(ec2SourceRanges, &ec2.IpRange{CidrIp: aws.String(sourceRange)})
}
permissions := NewIPPermissionSet()
for _, port := range apiService.Spec.Ports {
portInt64 := int64(port.Port)
protocol := strings.ToLower(string(port.Protocol))
permission := &ec2.IpPermission{}
permission.FromPort = &portInt64
permission.ToPort = &portInt64
permission.IpRanges = ec2SourceRanges
permission.IpProtocol = &protocol
permissions.Insert(permission)
}
_, err = s.setSecurityGroupIngress(securityGroupID, permissions)
if err != nil {
return nil, err
}
}
securityGroupIDs := []string{securityGroupID}
// Figure out what mappings we want on the load balancer
listeners := []*elb.Listener{}
for _, port := range apiService.Spec.Ports {
if port.NodePort == 0 {
glog.Errorf("Ignoring port without NodePort defined: %v", port)
continue
}
instancePort := int64(port.NodePort)
loadBalancerPort := int64(port.Port)
protocol := strings.ToLower(string(port.Protocol))
listener := &elb.Listener{}
listener.InstancePort = &instancePort
listener.LoadBalancerPort = &loadBalancerPort
listener.Protocol = &protocol
listener.InstanceProtocol = &protocol
listeners = append(listeners, listener)
}
// Build the load balancer itself
loadBalancer, err := s.ensureLoadBalancer(serviceName, loadBalancerName, listeners, subnetIDs, securityGroupIDs, internalELB)
if err != nil {
return nil, err
}
err = s.ensureLoadBalancerHealthCheck(loadBalancer, listeners)
if err != nil {
return nil, err
}
err = s.updateInstanceSecurityGroupsForLoadBalancer(loadBalancer, instances)
if err != nil {
glog.Warningf("Error opening ingress rules for the load balancer to the instances: %v", err)
return nil, err
}
err = s.ensureLoadBalancerInstances(orEmpty(loadBalancer.LoadBalancerName), loadBalancer.Instances, instances)
if err != nil {
glog.Warningf("Error registering instances with the load balancer: %v", err)
return nil, err
}
glog.V(1).Infof("Loadbalancer %s (%v) has DNS name %s", loadBalancerName, serviceName, orEmpty(loadBalancer.DNSName))
// TODO: Wait for creation?
status := toStatus(loadBalancer)
return status, nil
}
// GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer
func (s *AWSCloud) GetLoadBalancer(service *api.Service) (*api.LoadBalancerStatus, bool, error) {
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
lb, err := s.describeLoadBalancer(loadBalancerName)
if err != nil {
return nil, false, err
}
if lb == nil {
return nil, false, nil
}
status := toStatus(lb)
return status, true, nil
}
func toStatus(lb *elb.LoadBalancerDescription) *api.LoadBalancerStatus {
status := &api.LoadBalancerStatus{}
if !isNilOrEmpty(lb.DNSName) {
var ingress api.LoadBalancerIngress
ingress.Hostname = orEmpty(lb.DNSName)
status.Ingress = []api.LoadBalancerIngress{ingress}
}
return status
}
// Returns the first security group for an instance, or nil
// We only create instances with one security group, so we don't expect multiple security groups.
// However, if there are multiple security groups, we will choose the one tagged with our cluster filter.
// Otherwise we will return an error.
func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups map[string]*ec2.SecurityGroup) (*ec2.GroupIdentifier, error) {
instanceID := aws.StringValue(instance.InstanceId)
var tagged []*ec2.GroupIdentifier
var untagged []*ec2.GroupIdentifier
for _, group := range instance.SecurityGroups {
groupID := aws.StringValue(group.GroupId)
if groupID == "" {
glog.Warningf("Ignoring security group without id for instance %q: %v", instanceID, group)
continue
}
_, isTagged := taggedSecurityGroups[groupID]
if isTagged {
tagged = append(tagged, group)
} else {
untagged = append(untagged, group)
}
}
if len(tagged) > 0 {
// We create instances with one SG
// If users create multiple SGs, they must tag one of them as being k8s owned
if len(tagged) != 1 {
return nil, fmt.Errorf("Multiple tagged security groups found for instance %s; ensure only the k8s security group is tagged", instanceID)
}
return tagged[0], nil
}
if len(untagged) > 0 {
// For back-compat, we will allow a single untagged SG
if len(untagged) != 1 {
return nil, fmt.Errorf("Multiple untagged security groups found for instance %s; ensure the k8s security group is tagged", instanceID)
}
return untagged[0], nil
}
glog.Warningf("No security group found for instance %q", instanceID)
return nil, nil
}
// Return all the security groups that are tagged as being part of our cluster
func (s *AWSCloud) getTaggedSecurityGroups() (map[string]*ec2.SecurityGroup, error) {
request := &ec2.DescribeSecurityGroupsInput{}
request.Filters = s.addFilters(nil)
groups, err := s.ec2.DescribeSecurityGroups(request)
if err != nil {
return nil, fmt.Errorf("error querying security groups: %v", err)
}
m := make(map[string]*ec2.SecurityGroup)
for _, group := range groups {
id := aws.StringValue(group.GroupId)
if id == "" {
glog.Warningf("Ignoring group without id: %v", group)
continue
}
m[id] = group
}
return m, nil
}
// Open security group ingress rules on the instances so that the load balancer can talk to them
// Will also remove any security groups ingress rules for the load balancer that are _not_ needed for allInstances
func (s *AWSCloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancerDescription, allInstances []*ec2.Instance) error {
if s.cfg.Global.DisableSecurityGroupIngress {
return nil
}
// Determine the load balancer security group id
loadBalancerSecurityGroupId := ""
for _, securityGroup := range lb.SecurityGroups {
if isNilOrEmpty(securityGroup) {
continue
}
if loadBalancerSecurityGroupId != "" {
// We create LBs with one SG
glog.Warningf("Multiple security groups for load balancer: %q", orEmpty(lb.LoadBalancerName))
}
loadBalancerSecurityGroupId = *securityGroup
}
if loadBalancerSecurityGroupId == "" {
return fmt.Errorf("Could not determine security group for load balancer: %s", orEmpty(lb.LoadBalancerName))
}
// Get the actual list of groups that allow ingress from the load-balancer
describeRequest := &ec2.DescribeSecurityGroupsInput{}
filters := []*ec2.Filter{}
filters = append(filters, newEc2Filter("ip-permission.group-id", loadBalancerSecurityGroupId))
describeRequest.Filters = s.addFilters(filters)
actualGroups, err := s.ec2.DescribeSecurityGroups(describeRequest)
if err != nil {
return fmt.Errorf("error querying security groups for ELB: %v", err)
}
taggedSecurityGroups, err := s.getTaggedSecurityGroups()
if err != nil {
return fmt.Errorf("error querying for tagged security groups: %v", err)
}
// Open the firewall from the load balancer to the instance
// We don't actually have a trivial way to know in advance which security group the instance is in
// (it is probably the minion security group, but we don't easily have that).
// However, we _do_ have the list of security groups on the instance records.
// Map containing the changes we want to make; true to add, false to remove
instanceSecurityGroupIds := map[string]bool{}
// Scan instances for groups we want open
for _, instance := range allInstances {
securityGroup, err := findSecurityGroupForInstance(instance, taggedSecurityGroups)
if err != nil {
return err
}
if securityGroup == nil {
glog.Warning("Ignoring instance without security group: ", orEmpty(instance.InstanceId))
continue
}
id := aws.StringValue(securityGroup.GroupId)
if id == "" {
glog.Warningf("found security group without id: %v", securityGroup)
continue
}
instanceSecurityGroupIds[id] = true
}
// Compare to actual groups
for _, actualGroup := range actualGroups {
actualGroupID := aws.StringValue(actualGroup.GroupId)
if actualGroupID == "" {
glog.Warning("Ignoring group without ID: ", actualGroup)
continue
}
adding, found := instanceSecurityGroupIds[actualGroupID]
if found && adding {
// We don't need to make a change; the permission is already in place
delete(instanceSecurityGroupIds, actualGroupID)
} else {
// This group is not needed by allInstances; delete it
instanceSecurityGroupIds[actualGroupID] = false
}
}
for instanceSecurityGroupId, add := range instanceSecurityGroupIds {
if add {
glog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupId, instanceSecurityGroupId)
} else {
glog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupId, instanceSecurityGroupId)
}
sourceGroupId := &ec2.UserIdGroupPair{}
sourceGroupId.GroupId = &loadBalancerSecurityGroupId
allProtocols := "-1"
permission := &ec2.IpPermission{}
permission.IpProtocol = &allProtocols
permission.UserIdGroupPairs = []*ec2.UserIdGroupPair{sourceGroupId}
permissions := []*ec2.IpPermission{permission}
if add {
changed, err := s.addSecurityGroupIngress(instanceSecurityGroupId, permissions)
if err != nil {
return err
}
if !changed {
glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupId)
}
} else {
changed, err := s.removeSecurityGroupIngress(instanceSecurityGroupId, permissions)
if err != nil {
return err
}
if !changed {
glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupId)
}
}
}
return nil
}
// EnsureLoadBalancerDeleted implements LoadBalancer.EnsureLoadBalancerDeleted.
func (s *AWSCloud) EnsureLoadBalancerDeleted(service *api.Service) error {
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
lb, err := s.describeLoadBalancer(loadBalancerName)
if err != nil {
return err
}
if lb == nil {
glog.Info("Load balancer already deleted: ", loadBalancerName)
return nil
}
{
// De-authorize the load balancer security group from the instances security group
err = s.updateInstanceSecurityGroupsForLoadBalancer(lb, nil)
if err != nil {
glog.Error("Error deregistering load balancer from instance security groups: ", err)
return err
}
}
{
// Delete the load balancer itself
request := &elb.DeleteLoadBalancerInput{}
request.LoadBalancerName = lb.LoadBalancerName
_, err = s.elb.DeleteLoadBalancer(request)
if err != nil {
// TODO: Check if error was because load balancer was concurrently deleted
glog.Error("Error deleting load balancer: ", err)
return err
}
}
{
// Delete the security group(s) for the load balancer
// Note that this is annoying: the load balancer disappears from the API immediately, but it is still
// deleting in the background. We get a DependencyViolation until the load balancer has deleted itself
// Collect the security groups to delete
securityGroupIDs := map[string]struct{}{}
for _, securityGroupID := range lb.SecurityGroups {
if isNilOrEmpty(securityGroupID) {
glog.Warning("Ignoring empty security group in ", service.Name)
continue
}
securityGroupIDs[*securityGroupID] = struct{}{}
}
// Loop through and try to delete them
timeoutAt := time.Now().Add(time.Second * 600)
for {
for securityGroupID := range securityGroupIDs {
request := &ec2.DeleteSecurityGroupInput{}
request.GroupId = &securityGroupID
_, err := s.ec2.DeleteSecurityGroup(request)
if err == nil {
delete(securityGroupIDs, securityGroupID)
} else {
ignore := false
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "DependencyViolation" {
glog.V(2).Infof("Ignoring DependencyViolation while deleting load-balancer security group (%s), assuming because LB is in process of deleting", securityGroupID)
ignore = true
}
}
if !ignore {
return fmt.Errorf("error while deleting load balancer security group (%s): %v", securityGroupID, err)
}
}
}
if len(securityGroupIDs) == 0 {
glog.V(2).Info("Deleted all security groups for load balancer: ", service.Name)
break
}
if time.Now().After(timeoutAt) {
ids := []string{}
for id := range securityGroupIDs {
ids = append(ids, id)
}
return fmt.Errorf("timed out deleting ELB: %s. Could not delete security groups %v", service.Name, strings.Join(ids, ","))
}
glog.V(2).Info("Waiting for load-balancer to delete so we can delete security groups: ", service.Name)
time.Sleep(10 * time.Second)
}
}
return nil
}
// UpdateLoadBalancer implements LoadBalancer.UpdateLoadBalancer
func (s *AWSCloud) UpdateLoadBalancer(service *api.Service, hosts []string) error {
instances, err := s.getInstancesByNodeNames(hosts)
if err != nil {
return err
}
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
lb, err := s.describeLoadBalancer(loadBalancerName)
if err != nil {
return err
}
if lb == nil {
return fmt.Errorf("Load balancer not found")
}
err = s.ensureLoadBalancerInstances(orEmpty(lb.LoadBalancerName), lb.Instances, instances)
if err != nil {
return nil
}
err = s.updateInstanceSecurityGroupsForLoadBalancer(lb, instances)
if err != nil {
return err
}
return nil
}
// Returns the instance with the specified ID
func (a *AWSCloud) getInstanceByID(instanceID string) (*ec2.Instance, error) {
instances, err := a.getInstancesByIDs([]*string{&instanceID})
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, fmt.Errorf("no instances found for instance: %s", instanceID)
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for instance: %s", instanceID)
}
return instances[instanceID], nil
}
func (a *AWSCloud) getInstancesByIDs(instanceIDs []*string) (map[string]*ec2.Instance, error) {
instancesByID := make(map[string]*ec2.Instance)
if len(instanceIDs) == 0 {
return instancesByID, nil
}
request := &ec2.DescribeInstancesInput{
InstanceIds: instanceIDs,
}
instances, err := a.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
for _, instance := range instances {
instanceID := orEmpty(instance.InstanceId)
if instanceID == "" {
continue
}
instancesByID[instanceID] = instance
}
return instancesByID, nil
}
// Fetches instances by node names; returns an error if any cannot be found.
// This is implemented with a multi value filter on the node names, fetching the desired instances with a single query.
func (a *AWSCloud) getInstancesByNodeNames(nodeNames []string) ([]*ec2.Instance, error) {
names := aws.StringSlice(nodeNames)
nodeNameFilter := &ec2.Filter{
Name: aws.String("private-dns-name"),
Values: names,
}
filters := []*ec2.Filter{
nodeNameFilter,
newEc2Filter("instance-state-name", "running"),
}
filters = a.addFilters(filters)
request := &ec2.DescribeInstancesInput{
Filters: filters,
}
instances, err := a.ec2.DescribeInstances(request)
if err != nil {
glog.V(2).Infof("Failed to describe instances %v", nodeNames)
return nil, err
}
if len(instances) == 0 {
glog.V(3).Infof("Failed to find any instances %v", nodeNames)
return nil, nil
}
return instances, nil
}
// Returns the instance with the specified node name
// Returns nil if it does not exist
func (a *AWSCloud) findInstanceByNodeName(nodeName string) (*ec2.Instance, error) {
filters := []*ec2.Filter{
newEc2Filter("private-dns-name", nodeName),
newEc2Filter("instance-state-name", "running"),
}
filters = a.addFilters(filters)
request := &ec2.DescribeInstancesInput{
Filters: filters,
}
instances, err := a.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, nil
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for name: %s", nodeName)
}
return instances[0], nil
}
// Returns the instance with the specified node name
// Like findInstanceByNodeName, but returns error if node not found
func (a *AWSCloud) getInstanceByNodeName(nodeName string) (*ec2.Instance, error) {
instance, err := a.findInstanceByNodeName(nodeName)
if err == nil && instance == nil {
return nil, fmt.Errorf("no instances found for name: %s", nodeName)
}
return instance, err
}
// Add additional filters, to match on our tags
// This lets us run multiple k8s clusters in a single EC2 AZ
func (s *AWSCloud) addFilters(filters []*ec2.Filter) []*ec2.Filter {
for k, v := range s.filterTags {
filters = append(filters, newEc2Filter("tag:"+k, v))
}
if len(filters) == 0 {
// We can't pass a zero-length Filters to AWS (it's an error)
// So if we end up with no filters; just return nil
return nil
}
return filters
}
// Returns the cluster name or an empty string
func (s *AWSCloud) getClusterName() string {
return s.filterTags[TagNameKubernetesCluster]
}
Added AWS Seoul region
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"errors"
"fmt"
"io"
"net"
"net/url"
"os"
"regexp"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
"gopkg.in/gcfg.v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/credentialprovider/aws"
"k8s.io/kubernetes/pkg/types"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api/service"
"k8s.io/kubernetes/pkg/api/unversioned"
)
const ProviderName = "aws"
// The tag name we use to differentiate multiple logically independent clusters running in the same AZ
const TagNameKubernetesCluster = "KubernetesCluster"
// The tag name we use to differentiate multiple services. Used currently for ELBs only.
const TagNameKubernetesService = "kubernetes.io/service-name"
// The tag name used on a subnet to designate that it should be used for internal ELBs
const TagNameSubnetInternalELB = "kubernetes.io/role/internal-elb"
// The tag name used on a subnet to designate that it should be used for internet ELBs
const TagNameSubnetPublicELB = "kubernetes.io/role/elb"
// Annotation used on the service to indicate that we want an internal ELB.
// Currently we accept only the value "0.0.0.0/0" - other values are an error.
// This lets us define more advanced semantics in future.
const ServiceAnnotationLoadBalancerInternal = "service.beta.kubernetes.io/aws-load-balancer-internal"
// We sometimes read to see if something exists; then try to create it if we didn't find it
// This can fail once in a consistent system if done in parallel
// In an eventually consistent system, it could fail unboundedly
// MaxReadThenCreateRetries sets the maximum number of attempts we will make
const MaxReadThenCreateRetries = 30
// Default volume type for newly created Volumes
// TODO: Remove when user/admin can configure volume types and thus we don't
// need hardcoded defaults.
const DefaultVolumeType = "gp2"
// Amazon recommends having no more that 40 volumes attached to an instance,
// and at least one of those is for the system root volume.
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits
const DefaultMaxEBSVolumes = 39
// Used to call aws_credentials.Init() just once
var once sync.Once
// Abstraction over AWS, to allow mocking/other implementations
type AWSServices interface {
Compute(region string) (EC2, error)
LoadBalancing(region string) (ELB, error)
Autoscaling(region string) (ASG, error)
Metadata() (EC2Metadata, error)
}
// TODO: Should we rename this to AWS (EBS & ELB are not technically part of EC2)
// Abstraction over EC2, to allow mocking/other implementations
// Note that the DescribeX functions return a list, so callers don't need to deal with paging
type EC2 interface {
// Query EC2 for instances matching the filter
DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error)
// Attach a volume to an instance
AttachVolume(*ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error)
// Detach a volume from an instance it is attached to
DetachVolume(request *ec2.DetachVolumeInput) (resp *ec2.VolumeAttachment, err error)
// Lists volumes
DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error)
// Create an EBS volume
CreateVolume(request *ec2.CreateVolumeInput) (resp *ec2.Volume, err error)
// Delete an EBS volume
DeleteVolume(*ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error)
DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error)
CreateSecurityGroup(*ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error)
DeleteSecurityGroup(request *ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error)
AuthorizeSecurityGroupIngress(*ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error)
RevokeSecurityGroupIngress(*ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error)
DescribeSubnets(*ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error)
CreateTags(*ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error)
DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error)
CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error)
DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error)
ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error)
}
// This is a simple pass-through of the ELB client interface, which allows for testing
type ELB interface {
CreateLoadBalancer(*elb.CreateLoadBalancerInput) (*elb.CreateLoadBalancerOutput, error)
DeleteLoadBalancer(*elb.DeleteLoadBalancerInput) (*elb.DeleteLoadBalancerOutput, error)
DescribeLoadBalancers(*elb.DescribeLoadBalancersInput) (*elb.DescribeLoadBalancersOutput, error)
RegisterInstancesWithLoadBalancer(*elb.RegisterInstancesWithLoadBalancerInput) (*elb.RegisterInstancesWithLoadBalancerOutput, error)
DeregisterInstancesFromLoadBalancer(*elb.DeregisterInstancesFromLoadBalancerInput) (*elb.DeregisterInstancesFromLoadBalancerOutput, error)
DetachLoadBalancerFromSubnets(*elb.DetachLoadBalancerFromSubnetsInput) (*elb.DetachLoadBalancerFromSubnetsOutput, error)
AttachLoadBalancerToSubnets(*elb.AttachLoadBalancerToSubnetsInput) (*elb.AttachLoadBalancerToSubnetsOutput, error)
CreateLoadBalancerListeners(*elb.CreateLoadBalancerListenersInput) (*elb.CreateLoadBalancerListenersOutput, error)
DeleteLoadBalancerListeners(*elb.DeleteLoadBalancerListenersInput) (*elb.DeleteLoadBalancerListenersOutput, error)
ApplySecurityGroupsToLoadBalancer(*elb.ApplySecurityGroupsToLoadBalancerInput) (*elb.ApplySecurityGroupsToLoadBalancerOutput, error)
ConfigureHealthCheck(*elb.ConfigureHealthCheckInput) (*elb.ConfigureHealthCheckOutput, error)
}
// This is a simple pass-through of the Autoscaling client interface, which allows for testing
type ASG interface {
UpdateAutoScalingGroup(*autoscaling.UpdateAutoScalingGroupInput) (*autoscaling.UpdateAutoScalingGroupOutput, error)
DescribeAutoScalingGroups(*autoscaling.DescribeAutoScalingGroupsInput) (*autoscaling.DescribeAutoScalingGroupsOutput, error)
}
// Abstraction over the AWS metadata service
type EC2Metadata interface {
// Query the EC2 metadata service (used to discover instance-id etc)
GetMetadata(path string) (string, error)
}
type VolumeOptions struct {
CapacityGB int
Tags map[string]string
}
// Volumes is an interface for managing cloud-provisioned volumes
// TODO: Allow other clouds to implement this
type Volumes interface {
// Attach the disk to the specified instance
// instanceName can be empty to mean "the instance on which we are running"
// Returns the device (e.g. /dev/xvdf) where we attached the volume
AttachDisk(diskName string, instanceName string, readOnly bool) (string, error)
// Detach the disk from the specified instance
// instanceName can be empty to mean "the instance on which we are running"
// Returns the device where the volume was attached
DetachDisk(diskName string, instanceName string) (string, error)
// Create a volume with the specified options
CreateDisk(volumeOptions *VolumeOptions) (volumeName string, err error)
// Delete the specified volume
// Returns true iff the volume was deleted
// If the was not found, returns (false, nil)
DeleteDisk(volumeName string) (bool, error)
// Get labels to apply to volume on creation
GetVolumeLabels(volumeName string) (map[string]string, error)
}
// InstanceGroups is an interface for managing cloud-managed instance groups / autoscaling instance groups
// TODO: Allow other clouds to implement this
type InstanceGroups interface {
// Set the size to the fixed size
ResizeInstanceGroup(instanceGroupName string, size int) error
// Queries the cloud provider for information about the specified instance group
DescribeInstanceGroup(instanceGroupName string) (InstanceGroupInfo, error)
}
// InstanceGroupInfo is returned by InstanceGroups.Describe, and exposes information about the group.
type InstanceGroupInfo interface {
// The number of instances currently running under control of this group
CurrentSize() (int, error)
}
// AWSCloud is an implementation of Interface, LoadBalancer and Instances for Amazon Web Services.
type AWSCloud struct {
ec2 EC2
elb ELB
asg ASG
metadata EC2Metadata
cfg *AWSCloudConfig
region string
vpcID string
filterTags map[string]string
// The AWS instance that we are running on
// Note that we cache some state in awsInstance (mountpoints), so we must preserve the instance
selfAWSInstance *awsInstance
mutex sync.Mutex
}
var _ Volumes = &AWSCloud{}
type AWSCloudConfig struct {
Global struct {
// TODO: Is there any use for this? We can get it from the instance metadata service
// Maybe if we're not running on AWS, e.g. bootstrap; for now it is not very useful
Zone string
KubernetesClusterTag string
//The aws provider creates an inbound rule per load balancer on the node security
//group. However, this can run into the AWS security group rule limit of 50 if
//many LoadBalancers are created.
//
//This flag disables the automatic ingress creation. It requires that the user
//has setup a rule that allows inbound traffic on kubelet ports from the
//local VPC subnet (so load balancers can access it). E.g. 10.82.0.0/16 30000-32000.
DisableSecurityGroupIngress bool
}
}
// awsSdkEC2 is an implementation of the EC2 interface, backed by aws-sdk-go
type awsSdkEC2 struct {
ec2 *ec2.EC2
}
type awsSDKProvider struct {
creds *credentials.Credentials
mutex sync.Mutex
regionDelayers map[string]*CrossRequestRetryDelay
}
func newAWSSDKProvider(creds *credentials.Credentials) *awsSDKProvider {
return &awsSDKProvider{
creds: creds,
regionDelayers: make(map[string]*CrossRequestRetryDelay),
}
}
func (p *awsSDKProvider) addHandlers(regionName string, h *request.Handlers) {
h.Sign.PushFrontNamed(request.NamedHandler{
Name: "k8s/logger",
Fn: awsHandlerLogger,
})
delayer := p.getCrossRequestRetryDelay(regionName)
if delayer != nil {
h.Sign.PushFrontNamed(request.NamedHandler{
Name: "k8s/delay-presign",
Fn: delayer.BeforeSign,
})
h.AfterRetry.PushFrontNamed(request.NamedHandler{
Name: "k8s/delay-afterretry",
Fn: delayer.AfterRetry,
})
}
}
// Get a CrossRequestRetryDelay, scoped to the region, not to the request.
// This means that when we hit a limit on a call, we will delay _all_ calls to the API.
// We do this to protect the AWS account from becoming overloaded and effectively locked.
// We also log when we hit request limits.
// Note that this delays the current goroutine; this is bad behaviour and will
// likely cause k8s to become slow or unresponsive for cloud operations.
// However, this throttle is intended only as a last resort. When we observe
// this throttling, we need to address the root cause (e.g. add a delay to a
// controller retry loop)
func (p *awsSDKProvider) getCrossRequestRetryDelay(regionName string) *CrossRequestRetryDelay {
p.mutex.Lock()
defer p.mutex.Unlock()
delayer, found := p.regionDelayers[regionName]
if !found {
delayer = NewCrossRequestRetryDelay()
p.regionDelayers[regionName] = delayer
}
return delayer
}
func (p *awsSDKProvider) Compute(regionName string) (EC2, error) {
service := ec2.New(session.New(&aws.Config{
Region: ®ionName,
Credentials: p.creds,
}))
p.addHandlers(regionName, &service.Handlers)
ec2 := &awsSdkEC2{
ec2: service,
}
return ec2, nil
}
func (p *awsSDKProvider) LoadBalancing(regionName string) (ELB, error) {
elbClient := elb.New(session.New(&aws.Config{
Region: ®ionName,
Credentials: p.creds,
}))
p.addHandlers(regionName, &elbClient.Handlers)
return elbClient, nil
}
func (p *awsSDKProvider) Autoscaling(regionName string) (ASG, error) {
client := autoscaling.New(session.New(&aws.Config{
Region: ®ionName,
Credentials: p.creds,
}))
p.addHandlers(regionName, &client.Handlers)
return client, nil
}
func (p *awsSDKProvider) Metadata() (EC2Metadata, error) {
client := ec2metadata.New(session.New(&aws.Config{}))
return client, nil
}
func stringPointerArray(orig []string) []*string {
if orig == nil {
return nil
}
n := make([]*string, len(orig))
for i := range orig {
n[i] = &orig[i]
}
return n
}
func isNilOrEmpty(s *string) bool {
return s == nil || *s == ""
}
func orEmpty(s *string) string {
if s == nil {
return ""
}
return *s
}
func newEc2Filter(name string, value string) *ec2.Filter {
filter := &ec2.Filter{
Name: aws.String(name),
Values: []*string{
aws.String(value),
},
}
return filter
}
func (self *AWSCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return errors.New("unimplemented")
}
func (c *AWSCloud) CurrentNodeName(hostname string) (string, error) {
return c.selfAWSInstance.nodeName, nil
}
// Implementation of EC2.Instances
func (self *awsSdkEC2) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) {
// Instances are paged
results := []*ec2.Instance{}
var nextToken *string
for {
response, err := self.ec2.DescribeInstances(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS instances: %v", err)
}
for _, reservation := range response.Reservations {
results = append(results, reservation.Instances...)
}
nextToken = response.NextToken
if isNilOrEmpty(nextToken) {
break
}
request.NextToken = nextToken
}
return results, nil
}
// Implements EC2.DescribeSecurityGroups
func (s *awsSdkEC2) DescribeSecurityGroups(request *ec2.DescribeSecurityGroupsInput) ([]*ec2.SecurityGroup, error) {
// Security groups are not paged
response, err := s.ec2.DescribeSecurityGroups(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS security groups: %v", err)
}
return response.SecurityGroups, nil
}
func (s *awsSdkEC2) AttachVolume(request *ec2.AttachVolumeInput) (*ec2.VolumeAttachment, error) {
return s.ec2.AttachVolume(request)
}
func (s *awsSdkEC2) DetachVolume(request *ec2.DetachVolumeInput) (*ec2.VolumeAttachment, error) {
return s.ec2.DetachVolume(request)
}
func (s *awsSdkEC2) DescribeVolumes(request *ec2.DescribeVolumesInput) ([]*ec2.Volume, error) {
// Volumes are paged
results := []*ec2.Volume{}
var nextToken *string
for {
response, err := s.ec2.DescribeVolumes(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS volumes: %v", err)
}
results = append(results, response.Volumes...)
nextToken = response.NextToken
if isNilOrEmpty(nextToken) {
break
}
request.NextToken = nextToken
}
return results, nil
}
func (s *awsSdkEC2) CreateVolume(request *ec2.CreateVolumeInput) (resp *ec2.Volume, err error) {
return s.ec2.CreateVolume(request)
}
func (s *awsSdkEC2) DeleteVolume(request *ec2.DeleteVolumeInput) (*ec2.DeleteVolumeOutput, error) {
return s.ec2.DeleteVolume(request)
}
func (s *awsSdkEC2) DescribeSubnets(request *ec2.DescribeSubnetsInput) ([]*ec2.Subnet, error) {
// Subnets are not paged
response, err := s.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS subnets: %v", err)
}
return response.Subnets, nil
}
func (s *awsSdkEC2) CreateSecurityGroup(request *ec2.CreateSecurityGroupInput) (*ec2.CreateSecurityGroupOutput, error) {
return s.ec2.CreateSecurityGroup(request)
}
func (s *awsSdkEC2) DeleteSecurityGroup(request *ec2.DeleteSecurityGroupInput) (*ec2.DeleteSecurityGroupOutput, error) {
return s.ec2.DeleteSecurityGroup(request)
}
func (s *awsSdkEC2) AuthorizeSecurityGroupIngress(request *ec2.AuthorizeSecurityGroupIngressInput) (*ec2.AuthorizeSecurityGroupIngressOutput, error) {
return s.ec2.AuthorizeSecurityGroupIngress(request)
}
func (s *awsSdkEC2) RevokeSecurityGroupIngress(request *ec2.RevokeSecurityGroupIngressInput) (*ec2.RevokeSecurityGroupIngressOutput, error) {
return s.ec2.RevokeSecurityGroupIngress(request)
}
func (s *awsSdkEC2) CreateTags(request *ec2.CreateTagsInput) (*ec2.CreateTagsOutput, error) {
return s.ec2.CreateTags(request)
}
func (s *awsSdkEC2) DescribeRouteTables(request *ec2.DescribeRouteTablesInput) ([]*ec2.RouteTable, error) {
// Not paged
response, err := s.ec2.DescribeRouteTables(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS route tables: %v", err)
}
return response.RouteTables, nil
}
func (s *awsSdkEC2) CreateRoute(request *ec2.CreateRouteInput) (*ec2.CreateRouteOutput, error) {
return s.ec2.CreateRoute(request)
}
func (s *awsSdkEC2) DeleteRoute(request *ec2.DeleteRouteInput) (*ec2.DeleteRouteOutput, error) {
return s.ec2.DeleteRoute(request)
}
func (s *awsSdkEC2) ModifyInstanceAttribute(request *ec2.ModifyInstanceAttributeInput) (*ec2.ModifyInstanceAttributeOutput, error) {
return s.ec2.ModifyInstanceAttribute(request)
}
func init() {
cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) {
creds := credentials.NewChainCredentials(
[]credentials.Provider{
&credentials.EnvProvider{},
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(&aws.Config{})),
},
&credentials.SharedCredentialsProvider{},
})
aws := newAWSSDKProvider(creds)
return newAWSCloud(config, aws)
})
}
// readAWSCloudConfig reads an instance of AWSCloudConfig from config reader.
func readAWSCloudConfig(config io.Reader, metadata EC2Metadata) (*AWSCloudConfig, error) {
var cfg AWSCloudConfig
var err error
if config != nil {
err = gcfg.ReadInto(&cfg, config)
if err != nil {
return nil, err
}
}
if cfg.Global.Zone == "" {
if metadata != nil {
glog.Info("Zone not specified in configuration file; querying AWS metadata service")
cfg.Global.Zone, err = getAvailabilityZone(metadata)
if err != nil {
return nil, err
}
}
if cfg.Global.Zone == "" {
return nil, fmt.Errorf("no zone specified in configuration file")
}
}
return &cfg, nil
}
func getInstanceType(metadata EC2Metadata) (string, error) {
return metadata.GetMetadata("instance-type")
}
func getAvailabilityZone(metadata EC2Metadata) (string, error) {
return metadata.GetMetadata("placement/availability-zone")
}
func isRegionValid(region string) bool {
regions := [...]string{
"us-east-1",
"us-west-1",
"us-west-2",
"eu-west-1",
"eu-central-1",
"ap-southeast-1",
"ap-southeast-2",
"ap-northeast-1",
"ap-northeast-2",
"cn-north-1",
"us-gov-west-1",
"sa-east-1",
}
for _, r := range regions {
if r == region {
return true
}
}
return false
}
// Derives the region from a valid az name.
// Returns an error if the az is known invalid (empty)
func azToRegion(az string) (string, error) {
if len(az) < 1 {
return "", fmt.Errorf("invalid (empty) AZ")
}
region := az[:len(az)-1]
return region, nil
}
// newAWSCloud creates a new instance of AWSCloud.
// AWSProvider and instanceId are primarily for tests
func newAWSCloud(config io.Reader, awsServices AWSServices) (*AWSCloud, error) {
metadata, err := awsServices.Metadata()
if err != nil {
return nil, fmt.Errorf("error creating AWS metadata client: %v", err)
}
cfg, err := readAWSCloudConfig(config, metadata)
if err != nil {
return nil, fmt.Errorf("unable to read AWS cloud provider config file: %v", err)
}
zone := cfg.Global.Zone
if len(zone) <= 1 {
return nil, fmt.Errorf("invalid AWS zone in config file: %s", zone)
}
regionName, err := azToRegion(zone)
if err != nil {
return nil, err
}
valid := isRegionValid(regionName)
if !valid {
return nil, fmt.Errorf("not a valid AWS zone (unknown region): %s", zone)
}
ec2, err := awsServices.Compute(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS EC2 client: %v", err)
}
elb, err := awsServices.LoadBalancing(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS ELB client: %v", err)
}
asg, err := awsServices.Autoscaling(regionName)
if err != nil {
return nil, fmt.Errorf("error creating AWS autoscaling client: %v", err)
}
awsCloud := &AWSCloud{
ec2: ec2,
elb: elb,
asg: asg,
metadata: metadata,
cfg: cfg,
region: regionName,
}
selfAWSInstance, err := awsCloud.buildSelfAWSInstance()
if err != nil {
return nil, err
}
awsCloud.selfAWSInstance = selfAWSInstance
awsCloud.vpcID = selfAWSInstance.vpcID
filterTags := map[string]string{}
if cfg.Global.KubernetesClusterTag != "" {
filterTags[TagNameKubernetesCluster] = cfg.Global.KubernetesClusterTag
} else {
// TODO: Clean up double-API query
info, err := selfAWSInstance.describeInstance()
if err != nil {
return nil, err
}
for _, tag := range info.Tags {
if orEmpty(tag.Key) == TagNameKubernetesCluster {
filterTags[TagNameKubernetesCluster] = orEmpty(tag.Value)
}
}
}
if filterTags[TagNameKubernetesCluster] == "" {
glog.Errorf("Tag %q not found; Kuberentes may behave unexpectedly.", TagNameKubernetesCluster)
}
awsCloud.filterTags = filterTags
if len(filterTags) > 0 {
glog.Infof("AWS cloud filtering on tags: %v", filterTags)
} else {
glog.Infof("AWS cloud - no tag filtering")
}
// Register handler for ECR credentials
once.Do(func() {
aws_credentials.Init()
})
return awsCloud, nil
}
func (aws *AWSCloud) Clusters() (cloudprovider.Clusters, bool) {
return nil, false
}
// ProviderName returns the cloud provider ID.
func (aws *AWSCloud) ProviderName() string {
return ProviderName
}
// ScrubDNS filters DNS settings for pods.
func (aws *AWSCloud) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
return nameservers, searches
}
// LoadBalancer returns an implementation of LoadBalancer for Amazon Web Services.
func (s *AWSCloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
return s, true
}
// Instances returns an implementation of Instances for Amazon Web Services.
func (aws *AWSCloud) Instances() (cloudprovider.Instances, bool) {
return aws, true
}
// Zones returns an implementation of Zones for Amazon Web Services.
func (aws *AWSCloud) Zones() (cloudprovider.Zones, bool) {
return aws, true
}
// Routes returns an implementation of Routes for Amazon Web Services.
func (aws *AWSCloud) Routes() (cloudprovider.Routes, bool) {
return aws, true
}
// NodeAddresses is an implementation of Instances.NodeAddresses.
func (c *AWSCloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
if c.selfAWSInstance.nodeName == name || len(name) == 0 {
addresses := []api.NodeAddress{}
internalIP, err := c.metadata.GetMetadata("local-ipv4")
if err != nil {
return nil, err
}
addresses = append(addresses, api.NodeAddress{Type: api.NodeInternalIP, Address: internalIP})
// Legacy compatibility: the private ip was the legacy host ip
addresses = append(addresses, api.NodeAddress{Type: api.NodeLegacyHostIP, Address: internalIP})
externalIP, err := c.metadata.GetMetadata("public-ipv4")
if err != nil {
//TODO: It would be nice to be able to determine the reason for the failure,
// but the AWS client masks all failures with the same error description.
glog.V(2).Info("Could not determine public IP from AWS metadata.")
} else {
addresses = append(addresses, api.NodeAddress{Type: api.NodeExternalIP, Address: externalIP})
}
return addresses, nil
}
instance, err := c.getInstanceByNodeName(name)
if err != nil {
return nil, err
}
addresses := []api.NodeAddress{}
if !isNilOrEmpty(instance.PrivateIpAddress) {
ipAddress := *instance.PrivateIpAddress
ip := net.ParseIP(ipAddress)
if ip == nil {
return nil, fmt.Errorf("EC2 instance had invalid private address: %s (%s)", orEmpty(instance.InstanceId), ipAddress)
}
addresses = append(addresses, api.NodeAddress{Type: api.NodeInternalIP, Address: ip.String()})
// Legacy compatibility: the private ip was the legacy host ip
addresses = append(addresses, api.NodeAddress{Type: api.NodeLegacyHostIP, Address: ip.String()})
}
// TODO: Other IP addresses (multiple ips)?
if !isNilOrEmpty(instance.PublicIpAddress) {
ipAddress := *instance.PublicIpAddress
ip := net.ParseIP(ipAddress)
if ip == nil {
return nil, fmt.Errorf("EC2 instance had invalid public address: %s (%s)", orEmpty(instance.InstanceId), ipAddress)
}
addresses = append(addresses, api.NodeAddress{Type: api.NodeExternalIP, Address: ip.String()})
}
return addresses, nil
}
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
func (c *AWSCloud) ExternalID(name string) (string, error) {
if c.selfAWSInstance.nodeName == name {
// We assume that if this is run on the instance itself, the instance exists and is alive
return c.selfAWSInstance.awsID, nil
} else {
// We must verify that the instance still exists
// Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound)
instance, err := c.findInstanceByNodeName(name)
if err != nil {
return "", err
}
if instance == nil {
return "", cloudprovider.InstanceNotFound
}
return orEmpty(instance.InstanceId), nil
}
}
// InstanceID returns the cloud provider ID of the specified instance.
func (c *AWSCloud) InstanceID(name string) (string, error) {
// In the future it is possible to also return an endpoint as:
// <endpoint>/<zone>/<instanceid>
if c.selfAWSInstance.nodeName == name {
return "/" + c.selfAWSInstance.availabilityZone + "/" + c.selfAWSInstance.awsID, nil
} else {
inst, err := c.getInstanceByNodeName(name)
if err != nil {
return "", err
}
return "/" + orEmpty(inst.Placement.AvailabilityZone) + "/" + orEmpty(inst.InstanceId), nil
}
}
// InstanceType returns the type of the specified instance.
func (c *AWSCloud) InstanceType(name string) (string, error) {
if c.selfAWSInstance.nodeName == name {
return c.selfAWSInstance.instanceType, nil
} else {
inst, err := c.getInstanceByNodeName(name)
if err != nil {
return "", err
}
return orEmpty(inst.InstanceType), nil
}
}
// Return a list of instances matching regex string.
func (s *AWSCloud) getInstancesByRegex(regex string) ([]string, error) {
filters := []*ec2.Filter{newEc2Filter("instance-state-name", "running")}
filters = s.addFilters(filters)
request := &ec2.DescribeInstancesInput{
Filters: filters,
}
instances, err := s.ec2.DescribeInstances(request)
if err != nil {
return []string{}, err
}
if len(instances) == 0 {
return []string{}, fmt.Errorf("no instances returned")
}
if strings.HasPrefix(regex, "'") && strings.HasSuffix(regex, "'") {
glog.Infof("Stripping quotes around regex (%s)", regex)
regex = regex[1 : len(regex)-1]
}
re, err := regexp.Compile(regex)
if err != nil {
return []string{}, err
}
matchingInstances := []string{}
for _, instance := range instances {
// Only return fully-ready instances when listing instances
// (vs a query by name, where we will return it if we find it)
if orEmpty(instance.State.Name) == "pending" {
glog.V(2).Infof("Skipping EC2 instance (pending): %s", *instance.InstanceId)
continue
}
privateDNSName := orEmpty(instance.PrivateDnsName)
if privateDNSName == "" {
glog.V(2).Infof("Skipping EC2 instance (no PrivateDNSName): %s",
orEmpty(instance.InstanceId))
continue
}
for _, tag := range instance.Tags {
if orEmpty(tag.Key) == "Name" && re.MatchString(orEmpty(tag.Value)) {
matchingInstances = append(matchingInstances, privateDNSName)
break
}
}
}
glog.V(2).Infof("Matched EC2 instances: %s", matchingInstances)
return matchingInstances, nil
}
// List is an implementation of Instances.List.
func (aws *AWSCloud) List(filter string) ([]string, error) {
// TODO: Should really use tag query. No need to go regexp.
return aws.getInstancesByRegex(filter)
}
// GetZone implements Zones.GetZone
func (c *AWSCloud) GetZone() (cloudprovider.Zone, error) {
return cloudprovider.Zone{
FailureDomain: c.selfAWSInstance.availabilityZone,
Region: c.region,
}, nil
}
// Abstraction around AWS Instance Types
// There isn't an API to get information for a particular instance type (that I know of)
type awsInstanceType struct {
}
// Used to represent a mount device for attaching an EBS volume
// This should be stored as a single letter (i.e. c, not sdc or /dev/sdc)
type mountDevice string
// TODO: Also return number of mounts allowed?
func (self *awsInstanceType) getEBSMountDevices() []mountDevice {
// See: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
// We will generate "ba", "bb", "bc"..."bz", "ca", ..., up to DefaultMaxEBSVolumes
devices := []mountDevice{}
count := 0
for first := 'b'; count < DefaultMaxEBSVolumes; first++ {
for second := 'a'; count < DefaultMaxEBSVolumes && second <= 'z'; second++ {
device := mountDevice(fmt.Sprintf("%c%c", first, second))
devices = append(devices, device)
count++
}
}
return devices
}
type awsInstance struct {
ec2 EC2
// id in AWS
awsID string
// node name in k8s
nodeName string
// availability zone the instance resides in
availabilityZone string
// ID of VPC the instance resides in
vpcID string
// ID of subnet the instance resides in
subnetID string
// instance type
instanceType string
mutex sync.Mutex
// We keep an active list of devices we have assigned but not yet
// attached, to avoid a race condition where we assign a device mapping
// and then get a second request before we attach the volume
attaching map[mountDevice]string
}
// newAWSInstance creates a new awsInstance object
func newAWSInstance(ec2Service EC2, instance *ec2.Instance) *awsInstance {
az := ""
if instance.Placement != nil {
az = aws.StringValue(instance.Placement.AvailabilityZone)
}
self := &awsInstance{
ec2: ec2Service,
awsID: aws.StringValue(instance.InstanceId),
nodeName: aws.StringValue(instance.PrivateDnsName),
availabilityZone: az,
instanceType: aws.StringValue(instance.InstanceType),
vpcID: aws.StringValue(instance.VpcId),
subnetID: aws.StringValue(instance.SubnetId),
}
self.attaching = make(map[mountDevice]string)
return self
}
// Gets the awsInstanceType that models the instance type of this instance
func (self *awsInstance) getInstanceType() *awsInstanceType {
// TODO: Make this real
awsInstanceType := &awsInstanceType{}
return awsInstanceType
}
// Gets the full information about this instance from the EC2 API
func (self *awsInstance) describeInstance() (*ec2.Instance, error) {
instanceID := self.awsID
request := &ec2.DescribeInstancesInput{
InstanceIds: []*string{&instanceID},
}
instances, err := self.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, fmt.Errorf("no instances found for instance: %s", self.awsID)
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for instance: %s", self.awsID)
}
return instances[0], nil
}
// Gets the mountDevice already assigned to the volume, or assigns an unused mountDevice.
// If the volume is already assigned, this will return the existing mountDevice with alreadyAttached=true.
// Otherwise the mountDevice is assigned by finding the first available mountDevice, and it is returned with alreadyAttached=false.
func (self *awsInstance) getMountDevice(volumeID string, assign bool) (assigned mountDevice, alreadyAttached bool, err error) {
instanceType := self.getInstanceType()
if instanceType == nil {
return "", false, fmt.Errorf("could not get instance type for instance: %s", self.awsID)
}
// We lock to prevent concurrent mounts from conflicting
// We may still conflict if someone calls the API concurrently,
// but the AWS API will then fail one of the two attach operations
self.mutex.Lock()
defer self.mutex.Unlock()
info, err := self.describeInstance()
if err != nil {
return "", false, err
}
deviceMappings := map[mountDevice]string{}
for _, blockDevice := range info.BlockDeviceMappings {
name := aws.StringValue(blockDevice.DeviceName)
if strings.HasPrefix(name, "/dev/sd") {
name = name[7:]
}
if strings.HasPrefix(name, "/dev/xvd") {
name = name[8:]
}
if len(name) < 1 || len(name) > 2 {
glog.Warningf("Unexpected EBS DeviceName: %q", aws.StringValue(blockDevice.DeviceName))
}
deviceMappings[mountDevice(name)] = aws.StringValue(blockDevice.Ebs.VolumeId)
}
for mountDevice, volume := range self.attaching {
deviceMappings[mountDevice] = volume
}
// Check to see if this volume is already assigned a device on this machine
for mountDevice, mappingVolumeID := range deviceMappings {
if volumeID == mappingVolumeID {
if assign {
glog.Warningf("Got assignment call for already-assigned volume: %s@%s", mountDevice, mappingVolumeID)
}
return mountDevice, true, nil
}
}
if !assign {
return mountDevice(""), false, nil
}
// Check all the valid mountpoints to see if any of them are free
valid := instanceType.getEBSMountDevices()
chosen := mountDevice("")
for _, mountDevice := range valid {
_, found := deviceMappings[mountDevice]
if !found {
chosen = mountDevice
break
}
}
if chosen == "" {
glog.Warningf("Could not assign a mount device (all in use?). mappings=%v, valid=%v", deviceMappings, valid)
return "", false, fmt.Errorf("Too many EBS volumes attached to node %s.", self.nodeName)
}
self.attaching[chosen] = volumeID
glog.V(2).Infof("Assigned mount device %s -> volume %s", chosen, volumeID)
return chosen, false, nil
}
func (self *awsInstance) endAttaching(volumeID string, mountDevice mountDevice) {
self.mutex.Lock()
defer self.mutex.Unlock()
existingVolumeID, found := self.attaching[mountDevice]
if !found {
glog.Errorf("endAttaching on non-allocated device")
return
}
if volumeID != existingVolumeID {
glog.Errorf("endAttaching on device assigned to different volume")
return
}
glog.V(2).Infof("Releasing mount device mapping: %s -> volume %s", mountDevice, volumeID)
delete(self.attaching, mountDevice)
}
type awsDisk struct {
ec2 EC2
// Name in k8s
name string
// id in AWS
awsID string
}
func newAWSDisk(aws *AWSCloud, name string) (*awsDisk, error) {
// name looks like aws://availability-zone/id
// The original idea of the URL-style name was to put the AZ into the
// host, so we could find the AZ immediately from the name without
// querying the API. But it turns out we don't actually need it for
// Ubernetes-Lite, as we put the AZ into the labels on the PV instead.
// However, if in future we want to support Ubernetes-Lite
// volume-awareness without using PersistentVolumes, we likely will
// want the AZ in the host.
if !strings.HasPrefix(name, "aws://") {
name = "aws://" + "" + "/" + name
}
url, err := url.Parse(name)
if err != nil {
// TODO: Maybe we should pass a URL into the Volume functions
return nil, fmt.Errorf("Invalid disk name (%s): %v", name, err)
}
if url.Scheme != "aws" {
return nil, fmt.Errorf("Invalid scheme for AWS volume (%s)", name)
}
awsID := url.Path
if len(awsID) > 1 && awsID[0] == '/' {
awsID = awsID[1:]
}
// TODO: Regex match?
if strings.Contains(awsID, "/") || !strings.HasPrefix(awsID, "vol-") {
return nil, fmt.Errorf("Invalid format for AWS volume (%s)", name)
}
disk := &awsDisk{ec2: aws.ec2, name: name, awsID: awsID}
return disk, nil
}
// Gets the full information about this volume from the EC2 API
func (self *awsDisk) describeVolume() (*ec2.Volume, error) {
volumeID := self.awsID
request := &ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeID},
}
volumes, err := self.ec2.DescribeVolumes(request)
if err != nil {
return nil, fmt.Errorf("error querying ec2 for volume info: %v", err)
}
if len(volumes) == 0 {
return nil, fmt.Errorf("no volumes found for volume: %s", self.awsID)
}
if len(volumes) > 1 {
return nil, fmt.Errorf("multiple volumes found for volume: %s", self.awsID)
}
return volumes[0], nil
}
// waitForAttachmentStatus polls until the attachment status is the expected value
// TODO(justinsb): return (bool, error)
func (self *awsDisk) waitForAttachmentStatus(status string) error {
// TODO: There may be a faster way to get this when we're attaching locally
attempt := 0
maxAttempts := 60
for {
info, err := self.describeVolume()
if err != nil {
return err
}
if len(info.Attachments) > 1 {
glog.Warningf("Found multiple attachments for volume: %v", info)
}
attachmentStatus := ""
for _, attachment := range info.Attachments {
if attachmentStatus != "" {
glog.Warning("Found multiple attachments: ", info)
}
if attachment.State != nil {
attachmentStatus = *attachment.State
} else {
// Shouldn't happen, but don't panic...
glog.Warning("Ignoring nil attachment state: ", attachment)
}
}
if attachmentStatus == "" {
attachmentStatus = "detached"
}
if attachmentStatus == status {
return nil
}
glog.V(2).Infof("Waiting for volume state: actual=%s, desired=%s", attachmentStatus, status)
attempt++
if attempt > maxAttempts {
glog.Warningf("Timeout waiting for volume state: actual=%s, desired=%s", attachmentStatus, status)
return errors.New("Timeout waiting for volume state")
}
time.Sleep(1 * time.Second)
}
}
// Deletes the EBS disk
func (self *awsDisk) deleteVolume() (bool, error) {
request := &ec2.DeleteVolumeInput{VolumeId: aws.String(self.awsID)}
_, err := self.ec2.DeleteVolume(request)
if err != nil {
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "InvalidVolume.NotFound" {
return false, nil
}
}
return false, fmt.Errorf("error deleting EBS volumes: %v", err)
}
return true, nil
}
// Builds the awsInstance for the EC2 instance on which we are running.
// This is called when the AWSCloud is initialized, and should not be called otherwise (because the awsInstance for the local instance is a singleton with drive mapping state)
func (c *AWSCloud) buildSelfAWSInstance() (*awsInstance, error) {
if c.selfAWSInstance != nil {
panic("do not call buildSelfAWSInstance directly")
}
instanceId, err := c.metadata.GetMetadata("instance-id")
if err != nil {
return nil, fmt.Errorf("error fetching instance-id from ec2 metadata service: %v", err)
}
// We want to fetch the hostname via the EC2 metadata service
// (`GetMetadata("local-hostname")`): But see #11543 - we need to use
// the EC2 API to get the privateDnsName in case of a private DNS zone
// e.g. mydomain.io, because the metadata service returns the wrong
// hostname. Once we're doing that, we might as well get all our
// information from the instance returned by the EC2 API - it is a
// single API call to get all the information, and it means we don't
// have two code paths.
instance, err := c.getInstanceByID(instanceId)
if err != nil {
return nil, fmt.Errorf("error finding instance %s: %v", instanceId, err)
}
return newAWSInstance(c.ec2, instance), nil
}
// Gets the awsInstance with node-name nodeName, or the 'self' instance if nodeName == ""
func (c *AWSCloud) getAwsInstance(nodeName string) (*awsInstance, error) {
var awsInstance *awsInstance
if nodeName == "" {
awsInstance = c.selfAWSInstance
} else {
instance, err := c.getInstanceByNodeName(nodeName)
if err != nil {
return nil, fmt.Errorf("error finding instance %s: %v", nodeName, err)
}
awsInstance = newAWSInstance(c.ec2, instance)
}
return awsInstance, nil
}
// Implements Volumes.AttachDisk
func (c *AWSCloud) AttachDisk(diskName string, instanceName string, readOnly bool) (string, error) {
disk, err := newAWSDisk(c, diskName)
if err != nil {
return "", err
}
awsInstance, err := c.getAwsInstance(instanceName)
if err != nil {
return "", err
}
if readOnly {
// TODO: We could enforce this when we mount the volume (?)
// TODO: We could also snapshot the volume and attach copies of it
return "", errors.New("AWS volumes cannot be mounted read-only")
}
mountDevice, alreadyAttached, err := awsInstance.getMountDevice(disk.awsID, true)
if err != nil {
return "", err
}
// Inside the instance, the mountpoint always looks like /dev/xvdX (?)
hostDevice := "/dev/xvd" + string(mountDevice)
// In the EC2 API, it is sometimes is /dev/sdX and sometimes /dev/xvdX
// We are running on the node here, so we check if /dev/xvda exists to determine this
ec2Device := "/dev/xvd" + string(mountDevice)
if _, err := os.Stat("/dev/xvda"); os.IsNotExist(err) {
ec2Device = "/dev/sd" + string(mountDevice)
}
// attachEnded is set to true if the attach operation completed
// (successfully or not)
attachEnded := false
defer func() {
if attachEnded {
awsInstance.endAttaching(disk.awsID, mountDevice)
}
}()
if !alreadyAttached {
request := &ec2.AttachVolumeInput{
Device: aws.String(ec2Device),
InstanceId: aws.String(awsInstance.awsID),
VolumeId: aws.String(disk.awsID),
}
attachResponse, err := c.ec2.AttachVolume(request)
if err != nil {
attachEnded = true
// TODO: Check if the volume was concurrently attached?
return "", fmt.Errorf("Error attaching EBS volume: %v", err)
}
glog.V(2).Infof("AttachVolume request returned %v", attachResponse)
}
err = disk.waitForAttachmentStatus("attached")
if err != nil {
return "", err
}
attachEnded = true
return hostDevice, nil
}
// Implements Volumes.DetachDisk
func (aws *AWSCloud) DetachDisk(diskName string, instanceName string) (string, error) {
disk, err := newAWSDisk(aws, diskName)
if err != nil {
return "", err
}
awsInstance, err := aws.getAwsInstance(instanceName)
if err != nil {
return "", err
}
mountDevice, alreadyAttached, err := awsInstance.getMountDevice(disk.awsID, false)
if err != nil {
return "", err
}
if !alreadyAttached {
glog.Warning("DetachDisk called on non-attached disk: ", diskName)
// TODO: Continue? Tolerate non-attached error in DetachVolume?
}
request := ec2.DetachVolumeInput{
InstanceId: &awsInstance.awsID,
VolumeId: &disk.awsID,
}
response, err := aws.ec2.DetachVolume(&request)
if err != nil {
return "", fmt.Errorf("error detaching EBS volume: %v", err)
}
if response == nil {
return "", errors.New("no response from DetachVolume")
}
err = disk.waitForAttachmentStatus("detached")
if err != nil {
return "", err
}
if mountDevice != "" {
awsInstance.endAttaching(disk.awsID, mountDevice)
}
hostDevicePath := "/dev/xvd" + string(mountDevice)
return hostDevicePath, err
}
// Implements Volumes.CreateVolume
func (s *AWSCloud) CreateDisk(volumeOptions *VolumeOptions) (string, error) {
// Default to creating in the current zone
// TODO: Spread across zones?
createAZ := s.selfAWSInstance.availabilityZone
// TODO: Should we tag this with the cluster id (so it gets deleted when the cluster does?)
request := &ec2.CreateVolumeInput{}
request.AvailabilityZone = &createAZ
volSize := int64(volumeOptions.CapacityGB)
request.Size = &volSize
request.VolumeType = aws.String(DefaultVolumeType)
response, err := s.ec2.CreateVolume(request)
if err != nil {
return "", err
}
az := orEmpty(response.AvailabilityZone)
awsID := orEmpty(response.VolumeId)
volumeName := "aws://" + az + "/" + awsID
// apply tags
tags := make(map[string]string)
for k, v := range volumeOptions.Tags {
tags[k] = v
}
if s.getClusterName() != "" {
tags[TagNameKubernetesCluster] = s.getClusterName()
}
if len(tags) != 0 {
if err := s.createTags(awsID, tags); err != nil {
// delete the volume and hope it succeeds
_, delerr := s.DeleteDisk(volumeName)
if delerr != nil {
// delete did not succeed, we have a stray volume!
return "", fmt.Errorf("error tagging volume %s, could not delete the volume: %v", volumeName, delerr)
}
return "", fmt.Errorf("error tagging volume %s: %v", volumeName, err)
}
}
return volumeName, nil
}
// Implements Volumes.DeleteDisk
func (c *AWSCloud) DeleteDisk(volumeName string) (bool, error) {
awsDisk, err := newAWSDisk(c, volumeName)
if err != nil {
return false, err
}
return awsDisk.deleteVolume()
}
// Implements Volumes.GetVolumeLabels
func (c *AWSCloud) GetVolumeLabels(volumeName string) (map[string]string, error) {
awsDisk, err := newAWSDisk(c, volumeName)
if err != nil {
return nil, err
}
info, err := awsDisk.describeVolume()
if err != nil {
return nil, err
}
labels := make(map[string]string)
az := aws.StringValue(info.AvailabilityZone)
if az == "" {
return nil, fmt.Errorf("volume did not have AZ information: %q", info.VolumeId)
}
labels[unversioned.LabelZoneFailureDomain] = az
region, err := azToRegion(az)
if err != nil {
return nil, err
}
labels[unversioned.LabelZoneRegion] = region
return labels, nil
}
// Gets the current load balancer state
func (s *AWSCloud) describeLoadBalancer(name string) (*elb.LoadBalancerDescription, error) {
request := &elb.DescribeLoadBalancersInput{}
request.LoadBalancerNames = []*string{&name}
response, err := s.elb.DescribeLoadBalancers(request)
if err != nil {
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "LoadBalancerNotFound" {
return nil, nil
}
}
return nil, err
}
var ret *elb.LoadBalancerDescription
for _, loadBalancer := range response.LoadBalancerDescriptions {
if ret != nil {
glog.Errorf("Found multiple load balancers with name: %s", name)
}
ret = loadBalancer
}
return ret, nil
}
// Retrieves instance's vpc id from metadata
func (self *AWSCloud) findVPCID() (string, error) {
macs, err := self.metadata.GetMetadata("network/interfaces/macs/")
if err != nil {
return "", fmt.Errorf("Could not list interfaces of the instance: %v", err)
}
// loop over interfaces, first vpc id returned wins
for _, macPath := range strings.Split(macs, "\n") {
if len(macPath) == 0 {
continue
}
url := fmt.Sprintf("network/interfaces/macs/%svpc-id", macPath)
vpcID, err := self.metadata.GetMetadata(url)
if err != nil {
continue
}
return vpcID, nil
}
return "", fmt.Errorf("Could not find VPC ID in instance metadata")
}
// Retrieves the specified security group from the AWS API, or returns nil if not found
func (s *AWSCloud) findSecurityGroup(securityGroupId string) (*ec2.SecurityGroup, error) {
describeSecurityGroupsRequest := &ec2.DescribeSecurityGroupsInput{
GroupIds: []*string{&securityGroupId},
}
// We don't apply our tag filters because we are retrieving by ID
groups, err := s.ec2.DescribeSecurityGroups(describeSecurityGroupsRequest)
if err != nil {
glog.Warningf("Error retrieving security group: %q", err)
return nil, err
}
if len(groups) == 0 {
return nil, nil
}
if len(groups) != 1 {
// This should not be possible - ids should be unique
return nil, fmt.Errorf("multiple security groups found with same id %q", securityGroupId)
}
group := groups[0]
return group, nil
}
func isEqualIntPointer(l, r *int64) bool {
if l == nil {
return r == nil
}
if r == nil {
return l == nil
}
return *l == *r
}
func isEqualStringPointer(l, r *string) bool {
if l == nil {
return r == nil
}
if r == nil {
return l == nil
}
return *l == *r
}
func ipPermissionExists(newPermission, existing *ec2.IpPermission, compareGroupUserIDs bool) bool {
if !isEqualIntPointer(newPermission.FromPort, existing.FromPort) {
return false
}
if !isEqualIntPointer(newPermission.ToPort, existing.ToPort) {
return false
}
if !isEqualStringPointer(newPermission.IpProtocol, existing.IpProtocol) {
return false
}
// Check only if newPermission is a subset of existing. Usually it has zero or one elements.
// Not doing actual CIDR math yet; not clear it's needed, either.
glog.V(4).Infof("Comparing %v to %v", newPermission, existing)
if len(newPermission.IpRanges) > len(existing.IpRanges) {
return false
}
for j := range newPermission.IpRanges {
found := false
for k := range existing.IpRanges {
if isEqualStringPointer(newPermission.IpRanges[j].CidrIp, existing.IpRanges[k].CidrIp) {
found = true
break
}
}
if found == false {
return false
}
}
for _, leftPair := range newPermission.UserIdGroupPairs {
for _, rightPair := range existing.UserIdGroupPairs {
if isEqualUserGroupPair(leftPair, rightPair, compareGroupUserIDs) {
return true
}
}
return false
}
return true
}
func isEqualUserGroupPair(l, r *ec2.UserIdGroupPair, compareGroupUserIDs bool) bool {
glog.V(2).Infof("Comparing %v to %v", *l.GroupId, *r.GroupId)
if isEqualStringPointer(l.GroupId, r.GroupId) {
if compareGroupUserIDs {
if isEqualStringPointer(l.UserId, r.UserId) {
return true
}
} else {
return true
}
}
return false
}
// Makes sure the security group ingress is exactly the specified permissions
// Returns true if and only if changes were made
// The security group must already exist
func (s *AWSCloud) setSecurityGroupIngress(securityGroupId string, permissions IPPermissionSet) (bool, error) {
group, err := s.findSecurityGroup(securityGroupId)
if err != nil {
glog.Warning("Error retrieving security group", err)
return false, err
}
if group == nil {
return false, fmt.Errorf("security group not found: %s", securityGroupId)
}
glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupId, group.IpPermissions)
actual := NewIPPermissionSet(group.IpPermissions...)
// EC2 groups rules together, for example combining:
//
// { Port=80, Range=[A] } and { Port=80, Range=[B] }
//
// into { Port=80, Range=[A,B] }
//
// We have to ungroup them, because otherwise the logic becomes really
// complicated, and also because if we have Range=[A,B] and we try to
// add Range=[A] then EC2 complains about a duplicate rule.
permissions = permissions.Ungroup()
actual = actual.Ungroup()
remove := actual.Difference(permissions)
add := permissions.Difference(actual)
if add.Len() == 0 && remove.Len() == 0 {
return false, nil
}
// TODO: There is a limit in VPC of 100 rules per security group, so we
// probably should try grouping or combining to fit under this limit.
// But this is only used on the ELB security group currently, so it
// would require (ports * CIDRS) > 100. Also, it isn't obvious exactly
// how removing single permissions from compound rules works, and we
// don't want to accidentally open more than intended while we're
// applying changes.
if add.Len() != 0 {
glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupId, add.List())
request := &ec2.AuthorizeSecurityGroupIngressInput{}
request.GroupId = &securityGroupId
request.IpPermissions = add.List()
_, err = s.ec2.AuthorizeSecurityGroupIngress(request)
if err != nil {
return false, fmt.Errorf("error authorizing security group ingress: %v", err)
}
}
if remove.Len() != 0 {
glog.V(2).Infof("Remove security group ingress: %s %v", securityGroupId, remove.List())
request := &ec2.RevokeSecurityGroupIngressInput{}
request.GroupId = &securityGroupId
request.IpPermissions = remove.List()
_, err = s.ec2.RevokeSecurityGroupIngress(request)
if err != nil {
return false, fmt.Errorf("error revoking security group ingress: %v", err)
}
}
return true, nil
}
// Makes sure the security group includes the specified permissions
// Returns true if and only if changes were made
// The security group must already exist
func (s *AWSCloud) addSecurityGroupIngress(securityGroupId string, addPermissions []*ec2.IpPermission) (bool, error) {
group, err := s.findSecurityGroup(securityGroupId)
if err != nil {
glog.Warningf("Error retrieving security group: %v", err)
return false, err
}
if group == nil {
return false, fmt.Errorf("security group not found: %s", securityGroupId)
}
glog.V(2).Infof("Existing security group ingress: %s %v", securityGroupId, group.IpPermissions)
changes := []*ec2.IpPermission{}
for _, addPermission := range addPermissions {
hasUserID := false
for i := range addPermission.UserIdGroupPairs {
if addPermission.UserIdGroupPairs[i].UserId != nil {
hasUserID = true
}
}
found := false
for _, groupPermission := range group.IpPermissions {
if ipPermissionExists(addPermission, groupPermission, hasUserID) {
found = true
break
}
}
if !found {
changes = append(changes, addPermission)
}
}
if len(changes) == 0 {
return false, nil
}
glog.V(2).Infof("Adding security group ingress: %s %v", securityGroupId, changes)
request := &ec2.AuthorizeSecurityGroupIngressInput{}
request.GroupId = &securityGroupId
request.IpPermissions = changes
_, err = s.ec2.AuthorizeSecurityGroupIngress(request)
if err != nil {
glog.Warning("Error authorizing security group ingress", err)
return false, fmt.Errorf("error authorizing security group ingress: %v", err)
}
return true, nil
}
// Makes sure the security group no longer includes the specified permissions
// Returns true if and only if changes were made
// If the security group no longer exists, will return (false, nil)
func (s *AWSCloud) removeSecurityGroupIngress(securityGroupId string, removePermissions []*ec2.IpPermission) (bool, error) {
group, err := s.findSecurityGroup(securityGroupId)
if err != nil {
glog.Warningf("Error retrieving security group: %v", err)
return false, err
}
if group == nil {
glog.Warning("Security group not found: ", securityGroupId)
return false, nil
}
changes := []*ec2.IpPermission{}
for _, removePermission := range removePermissions {
hasUserID := false
for i := range removePermission.UserIdGroupPairs {
if removePermission.UserIdGroupPairs[i].UserId != nil {
hasUserID = true
}
}
var found *ec2.IpPermission
for _, groupPermission := range group.IpPermissions {
if ipPermissionExists(removePermission, groupPermission, hasUserID) {
found = removePermission
break
}
}
if found != nil {
changes = append(changes, found)
}
}
if len(changes) == 0 {
return false, nil
}
glog.V(2).Infof("Removing security group ingress: %s %v", securityGroupId, changes)
request := &ec2.RevokeSecurityGroupIngressInput{}
request.GroupId = &securityGroupId
request.IpPermissions = changes
_, err = s.ec2.RevokeSecurityGroupIngress(request)
if err != nil {
glog.Warningf("Error revoking security group ingress: %v", err)
return false, err
}
return true, nil
}
// Ensure that a resource has the correct tags
// If it has no tags, we assume that this was a problem caused by an error in between creation and tagging,
// and we add the tags. If it has a different cluster's tags, that is an error.
func (s *AWSCloud) ensureClusterTags(resourceID string, tags []*ec2.Tag) error {
actualTags := make(map[string]string)
for _, tag := range tags {
actualTags[aws.StringValue(tag.Key)] = aws.StringValue(tag.Value)
}
addTags := make(map[string]string)
for k, expected := range s.filterTags {
actual := actualTags[k]
if actual == expected {
continue
}
if actual == "" {
glog.Warningf("Resource %q was missing expected cluster tag %q. Will add (with value %q)", resourceID, k, expected)
addTags[k] = expected
} else {
return fmt.Errorf("resource %q has tag belonging to another cluster: %q=%q (expected %q)", resourceID, k, actual, expected)
}
}
if err := s.createTags(resourceID, addTags); err != nil {
return fmt.Errorf("error adding missing tags to resource %q: %v", resourceID, err)
}
return nil
}
// Makes sure the security group exists.
// For multi-cluster isolation, name must be globally unique, for example derived from the service UUID.
// Returns the security group id or error
func (s *AWSCloud) ensureSecurityGroup(name string, description string) (string, error) {
groupID := ""
attempt := 0
for {
attempt++
request := &ec2.DescribeSecurityGroupsInput{}
filters := []*ec2.Filter{
newEc2Filter("group-name", name),
newEc2Filter("vpc-id", s.vpcID),
}
// Note that we do _not_ add our tag filters; group-name + vpc-id is the EC2 primary key.
// However, we do check that it matches our tags.
// If it doesn't have any tags, we tag it; this is how we recover if we failed to tag before.
// If it has a different cluster's tags, that is an error.
// This shouldn't happen because name is expected to be globally unique (UUID derived)
request.Filters = filters
securityGroups, err := s.ec2.DescribeSecurityGroups(request)
if err != nil {
return "", err
}
if len(securityGroups) >= 1 {
if len(securityGroups) > 1 {
glog.Warningf("Found multiple security groups with name: %q", name)
}
err := s.ensureClusterTags(aws.StringValue(securityGroups[0].GroupId), securityGroups[0].Tags)
if err != nil {
return "", err
}
return aws.StringValue(securityGroups[0].GroupId), nil
}
createRequest := &ec2.CreateSecurityGroupInput{}
createRequest.VpcId = &s.vpcID
createRequest.GroupName = &name
createRequest.Description = &description
createResponse, err := s.ec2.CreateSecurityGroup(createRequest)
if err != nil {
ignore := false
switch err := err.(type) {
case awserr.Error:
if err.Code() == "InvalidGroup.Duplicate" && attempt < MaxReadThenCreateRetries {
glog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry")
ignore = true
}
}
if !ignore {
glog.Error("Error creating security group: ", err)
return "", err
}
time.Sleep(1 * time.Second)
} else {
groupID = orEmpty(createResponse.GroupId)
break
}
}
if groupID == "" {
return "", fmt.Errorf("created security group, but id was not returned: %s", name)
}
err := s.createTags(groupID, s.filterTags)
if err != nil {
// If we retry, ensureClusterTags will recover from this - it
// will add the missing tags. We could delete the security
// group here, but that doesn't feel like the right thing, as
// the caller is likely to retry the create
return "", fmt.Errorf("error tagging security group: %v", err)
}
return groupID, nil
}
// createTags calls EC2 CreateTags, but adds retry-on-failure logic
// We retry mainly because if we create an object, we cannot tag it until it is "fully created" (eventual consistency)
// The error code varies though (depending on what we are tagging), so we simply retry on all errors
func (s *AWSCloud) createTags(resourceID string, tags map[string]string) error {
if tags == nil || len(tags) == 0 {
return nil
}
var awsTags []*ec2.Tag
for k, v := range tags {
tag := &ec2.Tag{
Key: aws.String(k),
Value: aws.String(v),
}
awsTags = append(awsTags, tag)
}
request := &ec2.CreateTagsInput{}
request.Resources = []*string{&resourceID}
request.Tags = awsTags
// TODO: We really should do exponential backoff here
attempt := 0
maxAttempts := 60
for {
_, err := s.ec2.CreateTags(request)
if err == nil {
return nil
}
// We could check that the error is retryable, but the error code changes based on what we are tagging
// SecurityGroup: InvalidGroup.NotFound
attempt++
if attempt > maxAttempts {
glog.Warningf("Failed to create tags (too many attempts): %v", err)
return err
}
glog.V(2).Infof("Failed to create tags; will retry. Error was %v", err)
time.Sleep(1 * time.Second)
}
}
// Finds the value for a given tag.
func findTag(tags []*ec2.Tag, key string) (string, bool) {
for _, tag := range tags {
if aws.StringValue(tag.Key) == key {
return aws.StringValue(tag.Value), true
}
}
return "", false
}
// Finds the subnets associated with the cluster, by matching tags.
// For maximal backwards compatability, if no subnets are tagged, it will fall-back to the current subnet.
// However, in future this will likely be treated as an error.
func (c *AWSCloud) findSubnets() ([]*ec2.Subnet, error) {
request := &ec2.DescribeSubnetsInput{}
vpcIDFilter := newEc2Filter("vpc-id", c.vpcID)
filters := []*ec2.Filter{vpcIDFilter}
filters = c.addFilters(filters)
request.Filters = filters
subnets, err := c.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error describing subnets: %v", err)
}
if len(subnets) != 0 {
return subnets, nil
}
// Fall back to the current instance subnets, if nothing is tagged
glog.Warningf("No tagged subnets found; will fall-back to the current subnet only. This is likely to be an error in a future version of k8s.")
request = &ec2.DescribeSubnetsInput{}
filters = []*ec2.Filter{newEc2Filter("subnet-id", c.selfAWSInstance.subnetID)}
request.Filters = filters
subnets, err = c.ec2.DescribeSubnets(request)
if err != nil {
return nil, fmt.Errorf("error describing subnets: %v", err)
}
return subnets, nil
}
// Finds the subnets to use for an ELB we are creating.
// Normal (Internet-facing) ELBs must use public subnets, so we skip private subnets.
// Internal ELBs can use public or private subnets, but if we have a private subnet we should prefer that.
func (s *AWSCloud) findELBSubnets(internalELB bool) ([]string, error) {
vpcIDFilter := newEc2Filter("vpc-id", s.vpcID)
subnets, err := s.findSubnets()
if err != nil {
return nil, err
}
rRequest := &ec2.DescribeRouteTablesInput{}
rRequest.Filters = []*ec2.Filter{vpcIDFilter}
rt, err := s.ec2.DescribeRouteTables(rRequest)
if err != nil {
return nil, fmt.Errorf("error describe route table: %v", err)
}
subnetsByAZ := make(map[string]*ec2.Subnet)
for _, subnet := range subnets {
az := aws.StringValue(subnet.AvailabilityZone)
id := aws.StringValue(subnet.SubnetId)
if az == "" || id == "" {
glog.Warningf("Ignoring subnet with empty az/id: %v", subnet)
continue
}
isPublic, err := isSubnetPublic(rt, id)
if err != nil {
return nil, err
}
if !internalELB && !isPublic {
glog.V(2).Infof("Ignoring private subnet for public ELB %q", id)
continue
}
existing := subnetsByAZ[az]
if existing == nil {
subnetsByAZ[az] = subnet
continue
}
// Try to break the tie using a tag
var tagName string
if internalELB {
tagName = TagNameSubnetInternalELB
} else {
tagName = TagNameSubnetPublicELB
}
_, existingHasTag := findTag(existing.Tags, tagName)
_, subnetHasTag := findTag(subnet.Tags, tagName)
if existingHasTag != subnetHasTag {
if subnetHasTag {
subnetsByAZ[az] = subnet
}
continue
}
// TODO: Should this be an error?
glog.Warningf("Found multiple subnets in AZ %q; making arbitrary choice between subnets %q and %q", az, *existing.SubnetId, *subnet.SubnetId)
continue
}
var subnetIDs []string
for _, subnet := range subnetsByAZ {
subnetIDs = append(subnetIDs, aws.StringValue(subnet.SubnetId))
}
return subnetIDs, nil
}
func isSubnetPublic(rt []*ec2.RouteTable, subnetID string) (bool, error) {
var subnetTable *ec2.RouteTable
for _, table := range rt {
for _, assoc := range table.Associations {
if aws.StringValue(assoc.SubnetId) == subnetID {
subnetTable = table
break
}
}
}
if subnetTable == nil {
// If there is no explicit association, the subnet will be implicitly
// associated with the VPC's main routing table.
for _, table := range rt {
for _, assoc := range table.Associations {
if aws.BoolValue(assoc.Main) == true {
glog.V(4).Infof("Assuming implicit use of main routing table %s for %s",
aws.StringValue(table.RouteTableId), subnetID)
subnetTable = table
break
}
}
}
}
if subnetTable == nil {
return false, fmt.Errorf("Could not locate routing table for subnet %s", subnetID)
}
for _, route := range subnetTable.Routes {
// There is no direct way in the AWS API to determine if a subnet is public or private.
// A public subnet is one which has an internet gateway route
// we look for the gatewayId and make sure it has the prefix of igw to differentiate
// from the default in-subnet route which is called "local"
// or other virtual gateway (starting with vgv)
// or vpc peering connections (starting with pcx).
if strings.HasPrefix(aws.StringValue(route.GatewayId), "igw") {
return true, nil
}
}
return false, nil
}
// EnsureLoadBalancer implements LoadBalancer.EnsureLoadBalancer
func (s *AWSCloud) EnsureLoadBalancer(apiService *api.Service, hosts []string, annotations map[string]string) (*api.LoadBalancerStatus, error) {
glog.V(2).Infof("EnsureLoadBalancer(%v, %v, %v, %v, %v, %v, %v)",
apiService.Namespace, apiService.Name, s.region, apiService.Spec.LoadBalancerIP, apiService.Spec.Ports, hosts, annotations)
if apiService.Spec.SessionAffinity != api.ServiceAffinityNone {
// ELB supports sticky sessions, but only when configured for HTTP/HTTPS
return nil, fmt.Errorf("unsupported load balancer affinity: %v", apiService.Spec.SessionAffinity)
}
if len(apiService.Spec.Ports) == 0 {
return nil, fmt.Errorf("requested load balancer with no ports")
}
for _, port := range apiService.Spec.Ports {
if port.Protocol != api.ProtocolTCP {
return nil, fmt.Errorf("Only TCP LoadBalancer is supported for AWS ELB")
}
}
if apiService.Spec.LoadBalancerIP != "" {
return nil, fmt.Errorf("LoadBalancerIP cannot be specified for AWS ELB")
}
instances, err := s.getInstancesByNodeNames(hosts)
if err != nil {
return nil, err
}
sourceRanges, err := service.GetLoadBalancerSourceRanges(annotations)
if err != nil {
return nil, err
}
// Determine if this is tagged as an Internal ELB
internalELB := false
internalAnnotation := annotations[ServiceAnnotationLoadBalancerInternal]
if internalAnnotation != "" {
if internalAnnotation != "0.0.0.0/0" {
return nil, fmt.Errorf("annotation %q=%q detected, but the only value supported currently is 0.0.0.0/0", ServiceAnnotationLoadBalancerInternal, internalAnnotation)
}
if !service.IsAllowAll(sourceRanges) {
// TODO: Unify the two annotations
return nil, fmt.Errorf("source-range annotation cannot be combined with the internal-elb annotation")
}
internalELB = true
}
// Find the subnets that the ELB will live in
subnetIDs, err := s.findELBSubnets(internalELB)
if err != nil {
glog.Error("Error listing subnets in VPC: ", err)
return nil, err
}
// Bail out early if there are no subnets
if len(subnetIDs) == 0 {
return nil, fmt.Errorf("could not find any suitable subnets for creating the ELB")
}
loadBalancerName := cloudprovider.GetLoadBalancerName(apiService)
serviceName := types.NamespacedName{Namespace: apiService.Namespace, Name: apiService.Name}
// Create a security group for the load balancer
var securityGroupID string
{
sgName := "k8s-elb-" + loadBalancerName
sgDescription := fmt.Sprintf("Security group for Kubernetes ELB %s (%v)", loadBalancerName, serviceName)
securityGroupID, err = s.ensureSecurityGroup(sgName, sgDescription)
if err != nil {
glog.Error("Error creating load balancer security group: ", err)
return nil, err
}
ec2SourceRanges := []*ec2.IpRange{}
for _, sourceRange := range sourceRanges.StringSlice() {
ec2SourceRanges = append(ec2SourceRanges, &ec2.IpRange{CidrIp: aws.String(sourceRange)})
}
permissions := NewIPPermissionSet()
for _, port := range apiService.Spec.Ports {
portInt64 := int64(port.Port)
protocol := strings.ToLower(string(port.Protocol))
permission := &ec2.IpPermission{}
permission.FromPort = &portInt64
permission.ToPort = &portInt64
permission.IpRanges = ec2SourceRanges
permission.IpProtocol = &protocol
permissions.Insert(permission)
}
_, err = s.setSecurityGroupIngress(securityGroupID, permissions)
if err != nil {
return nil, err
}
}
securityGroupIDs := []string{securityGroupID}
// Figure out what mappings we want on the load balancer
listeners := []*elb.Listener{}
for _, port := range apiService.Spec.Ports {
if port.NodePort == 0 {
glog.Errorf("Ignoring port without NodePort defined: %v", port)
continue
}
instancePort := int64(port.NodePort)
loadBalancerPort := int64(port.Port)
protocol := strings.ToLower(string(port.Protocol))
listener := &elb.Listener{}
listener.InstancePort = &instancePort
listener.LoadBalancerPort = &loadBalancerPort
listener.Protocol = &protocol
listener.InstanceProtocol = &protocol
listeners = append(listeners, listener)
}
// Build the load balancer itself
loadBalancer, err := s.ensureLoadBalancer(serviceName, loadBalancerName, listeners, subnetIDs, securityGroupIDs, internalELB)
if err != nil {
return nil, err
}
err = s.ensureLoadBalancerHealthCheck(loadBalancer, listeners)
if err != nil {
return nil, err
}
err = s.updateInstanceSecurityGroupsForLoadBalancer(loadBalancer, instances)
if err != nil {
glog.Warningf("Error opening ingress rules for the load balancer to the instances: %v", err)
return nil, err
}
err = s.ensureLoadBalancerInstances(orEmpty(loadBalancer.LoadBalancerName), loadBalancer.Instances, instances)
if err != nil {
glog.Warningf("Error registering instances with the load balancer: %v", err)
return nil, err
}
glog.V(1).Infof("Loadbalancer %s (%v) has DNS name %s", loadBalancerName, serviceName, orEmpty(loadBalancer.DNSName))
// TODO: Wait for creation?
status := toStatus(loadBalancer)
return status, nil
}
// GetLoadBalancer is an implementation of LoadBalancer.GetLoadBalancer
func (s *AWSCloud) GetLoadBalancer(service *api.Service) (*api.LoadBalancerStatus, bool, error) {
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
lb, err := s.describeLoadBalancer(loadBalancerName)
if err != nil {
return nil, false, err
}
if lb == nil {
return nil, false, nil
}
status := toStatus(lb)
return status, true, nil
}
func toStatus(lb *elb.LoadBalancerDescription) *api.LoadBalancerStatus {
status := &api.LoadBalancerStatus{}
if !isNilOrEmpty(lb.DNSName) {
var ingress api.LoadBalancerIngress
ingress.Hostname = orEmpty(lb.DNSName)
status.Ingress = []api.LoadBalancerIngress{ingress}
}
return status
}
// Returns the first security group for an instance, or nil
// We only create instances with one security group, so we don't expect multiple security groups.
// However, if there are multiple security groups, we will choose the one tagged with our cluster filter.
// Otherwise we will return an error.
func findSecurityGroupForInstance(instance *ec2.Instance, taggedSecurityGroups map[string]*ec2.SecurityGroup) (*ec2.GroupIdentifier, error) {
instanceID := aws.StringValue(instance.InstanceId)
var tagged []*ec2.GroupIdentifier
var untagged []*ec2.GroupIdentifier
for _, group := range instance.SecurityGroups {
groupID := aws.StringValue(group.GroupId)
if groupID == "" {
glog.Warningf("Ignoring security group without id for instance %q: %v", instanceID, group)
continue
}
_, isTagged := taggedSecurityGroups[groupID]
if isTagged {
tagged = append(tagged, group)
} else {
untagged = append(untagged, group)
}
}
if len(tagged) > 0 {
// We create instances with one SG
// If users create multiple SGs, they must tag one of them as being k8s owned
if len(tagged) != 1 {
return nil, fmt.Errorf("Multiple tagged security groups found for instance %s; ensure only the k8s security group is tagged", instanceID)
}
return tagged[0], nil
}
if len(untagged) > 0 {
// For back-compat, we will allow a single untagged SG
if len(untagged) != 1 {
return nil, fmt.Errorf("Multiple untagged security groups found for instance %s; ensure the k8s security group is tagged", instanceID)
}
return untagged[0], nil
}
glog.Warningf("No security group found for instance %q", instanceID)
return nil, nil
}
// Return all the security groups that are tagged as being part of our cluster
func (s *AWSCloud) getTaggedSecurityGroups() (map[string]*ec2.SecurityGroup, error) {
request := &ec2.DescribeSecurityGroupsInput{}
request.Filters = s.addFilters(nil)
groups, err := s.ec2.DescribeSecurityGroups(request)
if err != nil {
return nil, fmt.Errorf("error querying security groups: %v", err)
}
m := make(map[string]*ec2.SecurityGroup)
for _, group := range groups {
id := aws.StringValue(group.GroupId)
if id == "" {
glog.Warningf("Ignoring group without id: %v", group)
continue
}
m[id] = group
}
return m, nil
}
// Open security group ingress rules on the instances so that the load balancer can talk to them
// Will also remove any security groups ingress rules for the load balancer that are _not_ needed for allInstances
func (s *AWSCloud) updateInstanceSecurityGroupsForLoadBalancer(lb *elb.LoadBalancerDescription, allInstances []*ec2.Instance) error {
if s.cfg.Global.DisableSecurityGroupIngress {
return nil
}
// Determine the load balancer security group id
loadBalancerSecurityGroupId := ""
for _, securityGroup := range lb.SecurityGroups {
if isNilOrEmpty(securityGroup) {
continue
}
if loadBalancerSecurityGroupId != "" {
// We create LBs with one SG
glog.Warningf("Multiple security groups for load balancer: %q", orEmpty(lb.LoadBalancerName))
}
loadBalancerSecurityGroupId = *securityGroup
}
if loadBalancerSecurityGroupId == "" {
return fmt.Errorf("Could not determine security group for load balancer: %s", orEmpty(lb.LoadBalancerName))
}
// Get the actual list of groups that allow ingress from the load-balancer
describeRequest := &ec2.DescribeSecurityGroupsInput{}
filters := []*ec2.Filter{}
filters = append(filters, newEc2Filter("ip-permission.group-id", loadBalancerSecurityGroupId))
describeRequest.Filters = s.addFilters(filters)
actualGroups, err := s.ec2.DescribeSecurityGroups(describeRequest)
if err != nil {
return fmt.Errorf("error querying security groups for ELB: %v", err)
}
taggedSecurityGroups, err := s.getTaggedSecurityGroups()
if err != nil {
return fmt.Errorf("error querying for tagged security groups: %v", err)
}
// Open the firewall from the load balancer to the instance
// We don't actually have a trivial way to know in advance which security group the instance is in
// (it is probably the minion security group, but we don't easily have that).
// However, we _do_ have the list of security groups on the instance records.
// Map containing the changes we want to make; true to add, false to remove
instanceSecurityGroupIds := map[string]bool{}
// Scan instances for groups we want open
for _, instance := range allInstances {
securityGroup, err := findSecurityGroupForInstance(instance, taggedSecurityGroups)
if err != nil {
return err
}
if securityGroup == nil {
glog.Warning("Ignoring instance without security group: ", orEmpty(instance.InstanceId))
continue
}
id := aws.StringValue(securityGroup.GroupId)
if id == "" {
glog.Warningf("found security group without id: %v", securityGroup)
continue
}
instanceSecurityGroupIds[id] = true
}
// Compare to actual groups
for _, actualGroup := range actualGroups {
actualGroupID := aws.StringValue(actualGroup.GroupId)
if actualGroupID == "" {
glog.Warning("Ignoring group without ID: ", actualGroup)
continue
}
adding, found := instanceSecurityGroupIds[actualGroupID]
if found && adding {
// We don't need to make a change; the permission is already in place
delete(instanceSecurityGroupIds, actualGroupID)
} else {
// This group is not needed by allInstances; delete it
instanceSecurityGroupIds[actualGroupID] = false
}
}
for instanceSecurityGroupId, add := range instanceSecurityGroupIds {
if add {
glog.V(2).Infof("Adding rule for traffic from the load balancer (%s) to instances (%s)", loadBalancerSecurityGroupId, instanceSecurityGroupId)
} else {
glog.V(2).Infof("Removing rule for traffic from the load balancer (%s) to instance (%s)", loadBalancerSecurityGroupId, instanceSecurityGroupId)
}
sourceGroupId := &ec2.UserIdGroupPair{}
sourceGroupId.GroupId = &loadBalancerSecurityGroupId
allProtocols := "-1"
permission := &ec2.IpPermission{}
permission.IpProtocol = &allProtocols
permission.UserIdGroupPairs = []*ec2.UserIdGroupPair{sourceGroupId}
permissions := []*ec2.IpPermission{permission}
if add {
changed, err := s.addSecurityGroupIngress(instanceSecurityGroupId, permissions)
if err != nil {
return err
}
if !changed {
glog.Warning("Allowing ingress was not needed; concurrent change? groupId=", instanceSecurityGroupId)
}
} else {
changed, err := s.removeSecurityGroupIngress(instanceSecurityGroupId, permissions)
if err != nil {
return err
}
if !changed {
glog.Warning("Revoking ingress was not needed; concurrent change? groupId=", instanceSecurityGroupId)
}
}
}
return nil
}
// EnsureLoadBalancerDeleted implements LoadBalancer.EnsureLoadBalancerDeleted.
func (s *AWSCloud) EnsureLoadBalancerDeleted(service *api.Service) error {
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
lb, err := s.describeLoadBalancer(loadBalancerName)
if err != nil {
return err
}
if lb == nil {
glog.Info("Load balancer already deleted: ", loadBalancerName)
return nil
}
{
// De-authorize the load balancer security group from the instances security group
err = s.updateInstanceSecurityGroupsForLoadBalancer(lb, nil)
if err != nil {
glog.Error("Error deregistering load balancer from instance security groups: ", err)
return err
}
}
{
// Delete the load balancer itself
request := &elb.DeleteLoadBalancerInput{}
request.LoadBalancerName = lb.LoadBalancerName
_, err = s.elb.DeleteLoadBalancer(request)
if err != nil {
// TODO: Check if error was because load balancer was concurrently deleted
glog.Error("Error deleting load balancer: ", err)
return err
}
}
{
// Delete the security group(s) for the load balancer
// Note that this is annoying: the load balancer disappears from the API immediately, but it is still
// deleting in the background. We get a DependencyViolation until the load balancer has deleted itself
// Collect the security groups to delete
securityGroupIDs := map[string]struct{}{}
for _, securityGroupID := range lb.SecurityGroups {
if isNilOrEmpty(securityGroupID) {
glog.Warning("Ignoring empty security group in ", service.Name)
continue
}
securityGroupIDs[*securityGroupID] = struct{}{}
}
// Loop through and try to delete them
timeoutAt := time.Now().Add(time.Second * 600)
for {
for securityGroupID := range securityGroupIDs {
request := &ec2.DeleteSecurityGroupInput{}
request.GroupId = &securityGroupID
_, err := s.ec2.DeleteSecurityGroup(request)
if err == nil {
delete(securityGroupIDs, securityGroupID)
} else {
ignore := false
if awsError, ok := err.(awserr.Error); ok {
if awsError.Code() == "DependencyViolation" {
glog.V(2).Infof("Ignoring DependencyViolation while deleting load-balancer security group (%s), assuming because LB is in process of deleting", securityGroupID)
ignore = true
}
}
if !ignore {
return fmt.Errorf("error while deleting load balancer security group (%s): %v", securityGroupID, err)
}
}
}
if len(securityGroupIDs) == 0 {
glog.V(2).Info("Deleted all security groups for load balancer: ", service.Name)
break
}
if time.Now().After(timeoutAt) {
ids := []string{}
for id := range securityGroupIDs {
ids = append(ids, id)
}
return fmt.Errorf("timed out deleting ELB: %s. Could not delete security groups %v", service.Name, strings.Join(ids, ","))
}
glog.V(2).Info("Waiting for load-balancer to delete so we can delete security groups: ", service.Name)
time.Sleep(10 * time.Second)
}
}
return nil
}
// UpdateLoadBalancer implements LoadBalancer.UpdateLoadBalancer
func (s *AWSCloud) UpdateLoadBalancer(service *api.Service, hosts []string) error {
instances, err := s.getInstancesByNodeNames(hosts)
if err != nil {
return err
}
loadBalancerName := cloudprovider.GetLoadBalancerName(service)
lb, err := s.describeLoadBalancer(loadBalancerName)
if err != nil {
return err
}
if lb == nil {
return fmt.Errorf("Load balancer not found")
}
err = s.ensureLoadBalancerInstances(orEmpty(lb.LoadBalancerName), lb.Instances, instances)
if err != nil {
return nil
}
err = s.updateInstanceSecurityGroupsForLoadBalancer(lb, instances)
if err != nil {
return err
}
return nil
}
// Returns the instance with the specified ID
func (a *AWSCloud) getInstanceByID(instanceID string) (*ec2.Instance, error) {
instances, err := a.getInstancesByIDs([]*string{&instanceID})
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, fmt.Errorf("no instances found for instance: %s", instanceID)
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for instance: %s", instanceID)
}
return instances[instanceID], nil
}
func (a *AWSCloud) getInstancesByIDs(instanceIDs []*string) (map[string]*ec2.Instance, error) {
instancesByID := make(map[string]*ec2.Instance)
if len(instanceIDs) == 0 {
return instancesByID, nil
}
request := &ec2.DescribeInstancesInput{
InstanceIds: instanceIDs,
}
instances, err := a.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
for _, instance := range instances {
instanceID := orEmpty(instance.InstanceId)
if instanceID == "" {
continue
}
instancesByID[instanceID] = instance
}
return instancesByID, nil
}
// Fetches instances by node names; returns an error if any cannot be found.
// This is implemented with a multi value filter on the node names, fetching the desired instances with a single query.
func (a *AWSCloud) getInstancesByNodeNames(nodeNames []string) ([]*ec2.Instance, error) {
names := aws.StringSlice(nodeNames)
nodeNameFilter := &ec2.Filter{
Name: aws.String("private-dns-name"),
Values: names,
}
filters := []*ec2.Filter{
nodeNameFilter,
newEc2Filter("instance-state-name", "running"),
}
filters = a.addFilters(filters)
request := &ec2.DescribeInstancesInput{
Filters: filters,
}
instances, err := a.ec2.DescribeInstances(request)
if err != nil {
glog.V(2).Infof("Failed to describe instances %v", nodeNames)
return nil, err
}
if len(instances) == 0 {
glog.V(3).Infof("Failed to find any instances %v", nodeNames)
return nil, nil
}
return instances, nil
}
// Returns the instance with the specified node name
// Returns nil if it does not exist
func (a *AWSCloud) findInstanceByNodeName(nodeName string) (*ec2.Instance, error) {
filters := []*ec2.Filter{
newEc2Filter("private-dns-name", nodeName),
newEc2Filter("instance-state-name", "running"),
}
filters = a.addFilters(filters)
request := &ec2.DescribeInstancesInput{
Filters: filters,
}
instances, err := a.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, nil
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for name: %s", nodeName)
}
return instances[0], nil
}
// Returns the instance with the specified node name
// Like findInstanceByNodeName, but returns error if node not found
func (a *AWSCloud) getInstanceByNodeName(nodeName string) (*ec2.Instance, error) {
instance, err := a.findInstanceByNodeName(nodeName)
if err == nil && instance == nil {
return nil, fmt.Errorf("no instances found for name: %s", nodeName)
}
return instance, err
}
// Add additional filters, to match on our tags
// This lets us run multiple k8s clusters in a single EC2 AZ
func (s *AWSCloud) addFilters(filters []*ec2.Filter) []*ec2.Filter {
for k, v := range s.filterTags {
filters = append(filters, newEc2Filter("tag:"+k, v))
}
if len(filters) == 0 {
// We can't pass a zero-length Filters to AWS (it's an error)
// So if we end up with no filters; just return nil
return nil
}
return filters
}
// Returns the cluster name or an empty string
func (s *AWSCloud) getClusterName() string {
return s.filterTags[TagNameKubernetesCluster]
}
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"fmt"
"reflect"
"strings"
"sync"
"time"
kubeerr "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/validation/path"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1alpha1 "k8s.io/apimachinery/pkg/apis/meta/v1alpha1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/endpoints/request"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/storage"
storeerr "k8s.io/apiserver/pkg/storage/errors"
"github.com/golang/glog"
)
// ObjectFunc is a function to act on a given object. An error may be returned
// if the hook cannot be completed. An ObjectFunc may transform the provided
// object.
type ObjectFunc func(obj runtime.Object) error
// GenericStore interface can be used for type assertions when we need to access the underlying strategies.
type GenericStore interface {
GetCreateStrategy() rest.RESTCreateStrategy
GetUpdateStrategy() rest.RESTUpdateStrategy
GetDeleteStrategy() rest.RESTDeleteStrategy
GetExportStrategy() rest.RESTExportStrategy
}
// Store implements pkg/api/rest.StandardStorage. It's intended to be
// embeddable and allows the consumer to implement any non-generic functions
// that are required. This object is intended to be copyable so that it can be
// used in different ways but share the same underlying behavior.
//
// All fields are required unless specified.
//
// The intended use of this type is embedding within a Kind specific
// RESTStorage implementation. This type provides CRUD semantics on a Kubelike
// resource, handling details like conflict detection with ResourceVersion and
// semantics. The RESTCreateStrategy, RESTUpdateStrategy, and
// RESTDeleteStrategy are generic across all backends, and encapsulate logic
// specific to the API.
//
// TODO: make the default exposed methods exactly match a generic RESTStorage
type Store struct {
// Copier is used to make some storage caching decorators work
Copier runtime.ObjectCopier
// NewFunc returns a new instance of the type this registry returns for a
// GET of a single object, e.g.:
//
// curl GET /apis/group/version/namespaces/my-ns/myresource/name-of-object
NewFunc func() runtime.Object
// NewListFunc returns a new list of the type this registry; it is the
// type returned when the resource is listed, e.g.:
//
// curl GET /apis/group/version/namespaces/my-ns/myresource
NewListFunc func() runtime.Object
// DefaultQualifiedResource is the pluralized name of the resource.
// This field is used if there is no request info present in the context.
// See qualifiedResourceFromContext for details.
DefaultQualifiedResource schema.GroupResource
// KeyRootFunc returns the root etcd key for this resource; should not
// include trailing "/". This is used for operations that work on the
// entire collection (listing and watching).
//
// KeyRootFunc and KeyFunc must be supplied together or not at all.
KeyRootFunc func(ctx genericapirequest.Context) string
// KeyFunc returns the key for a specific object in the collection.
// KeyFunc is called for Create/Update/Get/Delete. Note that 'namespace'
// can be gotten from ctx.
//
// KeyFunc and KeyRootFunc must be supplied together or not at all.
KeyFunc func(ctx genericapirequest.Context, name string) (string, error)
// ObjectNameFunc returns the name of an object or an error.
ObjectNameFunc func(obj runtime.Object) (string, error)
// TTLFunc returns the TTL (time to live) that objects should be persisted
// with. The existing parameter is the current TTL or the default for this
// operation. The update parameter indicates whether this is an operation
// against an existing object.
//
// Objects that are persisted with a TTL are evicted once the TTL expires.
TTLFunc func(obj runtime.Object, existing uint64, update bool) (uint64, error)
// PredicateFunc returns a matcher corresponding to the provided labels
// and fields. The SelectionPredicate returned should return true if the
// object matches the given field and label selectors.
PredicateFunc func(label labels.Selector, field fields.Selector) storage.SelectionPredicate
// EnableGarbageCollection affects the handling of Update and Delete
// requests. Enabling garbage collection allows finalizers to do work to
// finalize this object before the store deletes it.
//
// If any store has garbage collection enabled, it must also be enabled in
// the kube-controller-manager.
EnableGarbageCollection bool
// DeleteCollectionWorkers is the maximum number of workers in a single
// DeleteCollection call. Delete requests for the items in a collection
// are issued in parallel.
DeleteCollectionWorkers int
// Decorator is an optional exit hook on an object returned from the
// underlying storage. The returned object could be an individual object
// (e.g. Pod) or a list type (e.g. PodList). Decorator is intended for
// integrations that are above storage and should only be used for
// specific cases where storage of the value is not appropriate, since
// they cannot be watched.
Decorator ObjectFunc
// CreateStrategy implements resource-specific behavior during creation.
CreateStrategy rest.RESTCreateStrategy
// AfterCreate implements a further operation to run after a resource is
// created and before it is decorated, optional.
AfterCreate ObjectFunc
// UpdateStrategy implements resource-specific behavior during updates.
UpdateStrategy rest.RESTUpdateStrategy
// AfterUpdate implements a further operation to run after a resource is
// updated and before it is decorated, optional.
AfterUpdate ObjectFunc
// DeleteStrategy implements resource-specific behavior during deletion.
DeleteStrategy rest.RESTDeleteStrategy
// AfterDelete implements a further operation to run after a resource is
// deleted and before it is decorated, optional.
AfterDelete ObjectFunc
// ReturnDeletedObject determines whether the Store returns the object
// that was deleted. Otherwise, return a generic success status response.
ReturnDeletedObject bool
// ExportStrategy implements resource-specific behavior during export,
// optional. Exported objects are not decorated.
ExportStrategy rest.RESTExportStrategy
// TableConvertor is an optional interface for transforming items or lists
// of items into tabular output. If unset, the default will be used.
TableConvertor rest.TableConvertor
// Storage is the interface for the underlying storage for the resource.
Storage storage.Interface
// Called to cleanup clients used by the underlying Storage; optional.
DestroyFunc func()
// Maximum size of the watch history cached in memory, in number of entries.
// This value is ignored if Storage is non-nil. Nil is replaced with a default value.
// A zero integer will disable caching.
WatchCacheSize *int
}
// Note: the rest.StandardStorage interface aggregates the common REST verbs
var _ rest.StandardStorage = &Store{}
var _ rest.Exporter = &Store{}
var _ rest.TableConvertor = &Store{}
var _ GenericStore = &Store{}
const OptimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again"
// NamespaceKeyRootFunc is the default function for constructing storage paths
// to resource directories enforcing namespace rules.
func NamespaceKeyRootFunc(ctx genericapirequest.Context, prefix string) string {
key := prefix
ns, ok := genericapirequest.NamespaceFrom(ctx)
if ok && len(ns) > 0 {
key = key + "/" + ns
}
return key
}
// NamespaceKeyFunc is the default function for constructing storage paths to
// a resource relative to the given prefix enforcing namespace rules. If the
// context does not contain a namespace, it errors.
func NamespaceKeyFunc(ctx genericapirequest.Context, prefix string, name string) (string, error) {
key := NamespaceKeyRootFunc(ctx, prefix)
ns, ok := genericapirequest.NamespaceFrom(ctx)
if !ok || len(ns) == 0 {
return "", kubeerr.NewBadRequest("Namespace parameter required.")
}
if len(name) == 0 {
return "", kubeerr.NewBadRequest("Name parameter required.")
}
if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {
return "", kubeerr.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";")))
}
key = key + "/" + name
return key, nil
}
// NoNamespaceKeyFunc is the default function for constructing storage paths
// to a resource relative to the given prefix without a namespace.
func NoNamespaceKeyFunc(ctx genericapirequest.Context, prefix string, name string) (string, error) {
if len(name) == 0 {
return "", kubeerr.NewBadRequest("Name parameter required.")
}
if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {
return "", kubeerr.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";")))
}
key := prefix + "/" + name
return key, nil
}
// New implements RESTStorage.New.
func (e *Store) New() runtime.Object {
return e.NewFunc()
}
// NewList implements rest.Lister.
func (e *Store) NewList() runtime.Object {
return e.NewListFunc()
}
// GetCreateStrategy implements GenericStore.
func (e *Store) GetCreateStrategy() rest.RESTCreateStrategy {
return e.CreateStrategy
}
// GetUpdateStrategy implements GenericStore.
func (e *Store) GetUpdateStrategy() rest.RESTUpdateStrategy {
return e.UpdateStrategy
}
// GetDeleteStrategy implements GenericStore.
func (e *Store) GetDeleteStrategy() rest.RESTDeleteStrategy {
return e.DeleteStrategy
}
// GetExportStrategy implements GenericStore.
func (e *Store) GetExportStrategy() rest.RESTExportStrategy {
return e.ExportStrategy
}
// List returns a list of items matching labels and field according to the
// store's PredicateFunc.
func (e *Store) List(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (runtime.Object, error) {
label := labels.Everything()
if options != nil && options.LabelSelector != nil {
label = options.LabelSelector
}
field := fields.Everything()
if options != nil && options.FieldSelector != nil {
field = options.FieldSelector
}
out, err := e.ListPredicate(ctx, e.PredicateFunc(label, field), options)
if err != nil {
return nil, err
}
if e.Decorator != nil {
if err := e.Decorator(out); err != nil {
return nil, err
}
}
return out, nil
}
// ListPredicate returns a list of all the items matching the given
// SelectionPredicate.
func (e *Store) ListPredicate(ctx genericapirequest.Context, p storage.SelectionPredicate, options *metainternalversion.ListOptions) (runtime.Object, error) {
if options == nil {
// By default we should serve the request from etcd.
options = &metainternalversion.ListOptions{ResourceVersion: ""}
}
p.IncludeUninitialized = options.IncludeUninitialized
list := e.NewListFunc()
qualifiedResource := e.qualifiedResourceFromContext(ctx)
if name, ok := p.MatchesSingle(); ok {
if key, err := e.KeyFunc(ctx, name); err == nil {
err := e.Storage.GetToList(ctx, key, options.ResourceVersion, p, list)
return list, storeerr.InterpretListError(err, qualifiedResource)
}
// if we cannot extract a key based on the current context, the optimization is skipped
}
err := e.Storage.List(ctx, e.KeyRootFunc(ctx), options.ResourceVersion, p, list)
return list, storeerr.InterpretListError(err, qualifiedResource)
}
// Create inserts a new item according to the unique key from the object.
func (e *Store) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) {
if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
return nil, err
}
name, err := e.ObjectNameFunc(obj)
if err != nil {
return nil, err
}
key, err := e.KeyFunc(ctx, name)
if err != nil {
return nil, err
}
qualifiedResource := e.qualifiedResourceFromContext(ctx)
ttl, err := e.calculateTTL(obj, 0, false)
if err != nil {
return nil, err
}
out := e.NewFunc()
if err := e.Storage.Create(ctx, key, obj, out, ttl); err != nil {
err = storeerr.InterpretCreateError(err, qualifiedResource, name)
err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
if !kubeerr.IsAlreadyExists(err) {
return nil, err
}
if errGet := e.Storage.Get(ctx, key, "", out, false); errGet != nil {
return nil, err
}
accessor, errGetAcc := meta.Accessor(out)
if errGetAcc != nil {
return nil, err
}
if accessor.GetDeletionTimestamp() != nil {
msg := &err.(*kubeerr.StatusError).ErrStatus.Message
*msg = fmt.Sprintf("object is being deleted: %s", *msg)
}
return nil, err
}
if e.AfterCreate != nil {
if err := e.AfterCreate(out); err != nil {
return nil, err
}
}
if e.Decorator != nil {
if err := e.Decorator(obj); err != nil {
return nil, err
}
}
if !includeUninitialized {
return e.WaitForInitialized(ctx, out)
}
return out, nil
}
// WaitForInitialized holds until the object is initialized, or returns an error if the default limit expires.
// This method is exposed publicly for consumers of generic rest tooling.
func (e *Store) WaitForInitialized(ctx genericapirequest.Context, obj runtime.Object) (runtime.Object, error) {
// return early if we don't have initializers, or if they've completed already
accessor, err := meta.Accessor(obj)
if err != nil {
return obj, nil
}
initializers := accessor.GetInitializers()
if initializers == nil {
return obj, nil
}
if result := initializers.Result; result != nil {
return nil, kubeerr.FromObject(result)
}
key, err := e.KeyFunc(ctx, accessor.GetName())
if err != nil {
return nil, err
}
qualifiedResource := e.qualifiedResourceFromContext(ctx)
w, err := e.Storage.Watch(ctx, key, accessor.GetResourceVersion(), storage.SelectionPredicate{
Label: labels.Everything(),
Field: fields.Everything(),
IncludeUninitialized: true,
})
if err != nil {
return nil, err
}
defer w.Stop()
latest := obj
ch := w.ResultChan()
for {
select {
case event, ok := <-ch:
if !ok {
msg := fmt.Sprintf("server has timed out waiting for the initialization of %s %s",
qualifiedResource.String(), accessor.GetName())
return nil, kubeerr.NewTimeoutError(msg, 0)
}
switch event.Type {
case watch.Deleted:
if latest = event.Object; latest != nil {
if accessor, err := meta.Accessor(latest); err == nil {
if initializers := accessor.GetInitializers(); initializers != nil && initializers.Result != nil {
// initialization failed, but we missed the modification event
return nil, kubeerr.FromObject(initializers.Result)
}
}
}
return nil, kubeerr.NewInternalError(fmt.Errorf("object deleted while waiting for creation"))
case watch.Error:
if status, ok := event.Object.(*metav1.Status); ok {
return nil, &kubeerr.StatusError{ErrStatus: *status}
}
return nil, kubeerr.NewInternalError(fmt.Errorf("unexpected object in watch stream, can't complete initialization %T", event.Object))
case watch.Modified:
latest = event.Object
accessor, err = meta.Accessor(latest)
if err != nil {
return nil, kubeerr.NewInternalError(fmt.Errorf("object no longer has access to metadata %T: %v", latest, err))
}
initializers := accessor.GetInitializers()
if initializers == nil {
// completed initialization
return latest, nil
}
if result := initializers.Result; result != nil {
// initialization failed
return nil, kubeerr.FromObject(result)
}
}
case <-ctx.Done():
}
}
}
// shouldDeleteDuringUpdate checks if a Update is removing all the object's
// finalizers. If so, it further checks if the object's
// DeletionGracePeriodSeconds is 0.
func (e *Store) shouldDeleteDuringUpdate(ctx genericapirequest.Context, key string, obj, existing runtime.Object) bool {
newMeta, err := meta.Accessor(obj)
if err != nil {
utilruntime.HandleError(err)
return false
}
oldMeta, err := meta.Accessor(existing)
if err != nil {
utilruntime.HandleError(err)
return false
}
return len(newMeta.GetFinalizers()) == 0 && oldMeta.GetDeletionGracePeriodSeconds() != nil && *oldMeta.GetDeletionGracePeriodSeconds() == 0
}
// shouldDeleteForFailedInitialization returns true if the provided object is initializing and has
// a failure recorded.
func (e *Store) shouldDeleteForFailedInitialization(ctx genericapirequest.Context, obj runtime.Object) bool {
m, err := meta.Accessor(obj)
if err != nil {
utilruntime.HandleError(err)
return false
}
if initializers := m.GetInitializers(); initializers != nil && initializers.Result != nil {
return true
}
return false
}
// deleteWithoutFinalizers handles deleting an object ignoring its finalizer list.
// Used for objects that are either been finalized or have never initialized.
func (e *Store) deleteWithoutFinalizers(ctx genericapirequest.Context, name, key string, obj runtime.Object, preconditions *storage.Preconditions) (runtime.Object, bool, error) {
out := e.NewFunc()
glog.V(6).Infof("going to delete %s from registry, triggered by update", name)
if err := e.Storage.Delete(ctx, key, out, preconditions); err != nil {
// Deletion is racy, i.e., there could be multiple update
// requests to remove all finalizers from the object, so we
// ignore the NotFound error.
if storage.IsNotFound(err) {
_, err := e.finalizeDelete(ctx, obj, true)
// clients are expecting an updated object if a PUT succeeded,
// but finalizeDelete returns a metav1.Status, so return
// the object in the request instead.
return obj, false, err
}
return nil, false, storeerr.InterpretDeleteError(err, e.qualifiedResourceFromContext(ctx), name)
}
_, err := e.finalizeDelete(ctx, out, true)
// clients are expecting an updated object if a PUT succeeded, but
// finalizeDelete returns a metav1.Status, so return the object in
// the request instead.
return obj, false, err
}
// Update performs an atomic update and set of the object. Returns the result of the update
// or an error. If the registry allows create-on-update, the create flow will be executed.
// A bool is returned along with the object and any errors, to indicate object creation.
func (e *Store) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) {
key, err := e.KeyFunc(ctx, name)
if err != nil {
return nil, false, err
}
var (
creatingObj runtime.Object
creating = false
)
qualifiedResource := e.qualifiedResourceFromContext(ctx)
storagePreconditions := &storage.Preconditions{}
if preconditions := objInfo.Preconditions(); preconditions != nil {
storagePreconditions.UID = preconditions.UID
}
out := e.NewFunc()
// deleteObj is only used in case a deletion is carried out
var deleteObj runtime.Object
err = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) {
// Given the existing object, get the new object
obj, err := objInfo.UpdatedObject(ctx, existing)
if err != nil {
return nil, nil, err
}
// If AllowUnconditionalUpdate() is true and the object specified by
// the user does not have a resource version, then we populate it with
// the latest version. Else, we check that the version specified by
// the user matches the version of latest storage object.
resourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)
if err != nil {
return nil, nil, err
}
doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate()
version, err := e.Storage.Versioner().ObjectResourceVersion(existing)
if err != nil {
return nil, nil, err
}
if version == 0 {
if !e.UpdateStrategy.AllowCreateOnUpdate() {
return nil, nil, kubeerr.NewNotFound(qualifiedResource, name)
}
creating = true
creatingObj = obj
if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
return nil, nil, err
}
ttl, err := e.calculateTTL(obj, 0, false)
if err != nil {
return nil, nil, err
}
return obj, &ttl, nil
}
creating = false
creatingObj = nil
if doUnconditionalUpdate {
// Update the object's resource version to match the latest
// storage object's resource version.
err = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion)
if err != nil {
return nil, nil, err
}
} else {
// Check if the object's resource version matches the latest
// resource version.
newVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)
if err != nil {
return nil, nil, err
}
if newVersion == 0 {
// TODO: The Invalid error should have a field for Resource.
// After that field is added, we should fill the Resource and
// leave the Kind field empty. See the discussion in #18526.
qualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource}
fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newVersion, "must be specified for an update")}
return nil, nil, kubeerr.NewInvalid(qualifiedKind, name, fieldErrList)
}
if newVersion != version {
return nil, nil, kubeerr.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg))
}
}
if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {
return nil, nil, err
}
if e.shouldDeleteDuringUpdate(ctx, key, obj, existing) {
deleteObj = obj
return nil, nil, errEmptiedFinalizers
}
ttl, err := e.calculateTTL(obj, res.TTL, true)
if err != nil {
return nil, nil, err
}
if int64(ttl) != res.TTL {
return obj, &ttl, nil
}
return obj, nil, nil
})
if err != nil {
// delete the object
if err == errEmptiedFinalizers {
return e.deleteWithoutFinalizers(ctx, name, key, deleteObj, storagePreconditions)
}
if creating {
err = storeerr.InterpretCreateError(err, qualifiedResource, name)
err = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj)
} else {
err = storeerr.InterpretUpdateError(err, qualifiedResource, name)
}
return nil, false, err
}
if e.shouldDeleteForFailedInitialization(ctx, out) {
return e.deleteWithoutFinalizers(ctx, name, key, out, storagePreconditions)
}
if creating {
if e.AfterCreate != nil {
if err := e.AfterCreate(out); err != nil {
return nil, false, err
}
}
} else {
if e.AfterUpdate != nil {
if err := e.AfterUpdate(out); err != nil {
return nil, false, err
}
}
}
if e.Decorator != nil {
if err := e.Decorator(out); err != nil {
return nil, false, err
}
}
return out, creating, nil
}
// Get retrieves the item from storage.
func (e *Store) Get(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
obj := e.NewFunc()
key, err := e.KeyFunc(ctx, name)
if err != nil {
return nil, err
}
if err := e.Storage.Get(ctx, key, options.ResourceVersion, obj, false); err != nil {
return nil, storeerr.InterpretGetError(err, e.qualifiedResourceFromContext(ctx), name)
}
if e.Decorator != nil {
if err := e.Decorator(obj); err != nil {
return nil, err
}
}
return obj, nil
}
// qualifiedResourceFromContext attempts to retrieve a GroupResource from the context's request info.
// If the context has no request info, DefaultQualifiedResource is used.
func (e *Store) qualifiedResourceFromContext(ctx genericapirequest.Context) schema.GroupResource {
if info, ok := request.RequestInfoFrom(ctx); ok {
return schema.GroupResource{Group: info.APIGroup, Resource: info.Resource}
}
// some implementations access storage directly and thus the context has no RequestInfo
return e.DefaultQualifiedResource
}
var (
errAlreadyDeleting = fmt.Errorf("abort delete")
errDeleteNow = fmt.Errorf("delete now")
errEmptiedFinalizers = fmt.Errorf("emptied finalizers")
)
// shouldOrphanDependents returns true if the finalizer for orphaning should be set
// updated for FinalizerOrphanDependents. In the order of highest to lowest
// priority, there are three factors affect whether to add/remove the
// FinalizerOrphanDependents: options, existing finalizers of the object,
// and e.DeleteStrategy.DefaultGarbageCollectionPolicy.
func shouldOrphanDependents(e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool {
if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok {
if gcStrategy.DefaultGarbageCollectionPolicy() == rest.Unsupported {
// return false to indicate that we should NOT orphan
return false
}
}
// An explicit policy was set at deletion time, that overrides everything
if options != nil && options.OrphanDependents != nil {
return *options.OrphanDependents
}
if options != nil && options.PropagationPolicy != nil {
switch *options.PropagationPolicy {
case metav1.DeletePropagationOrphan:
return true
case metav1.DeletePropagationBackground, metav1.DeletePropagationForeground:
return false
}
}
// If a finalizer is set in the object, it overrides the default
// validation should make sure the two cases won't be true at the same time.
finalizers := accessor.GetFinalizers()
for _, f := range finalizers {
switch f {
case metav1.FinalizerOrphanDependents:
return true
case metav1.FinalizerDeleteDependents:
return false
}
}
// Get default orphan policy from this REST object type if it exists
if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok {
if gcStrategy.DefaultGarbageCollectionPolicy() == rest.OrphanDependents {
return true
}
}
return false
}
// shouldDeleteDependents returns true if the finalizer for foreground deletion should be set
// updated for FinalizerDeleteDependents. In the order of highest to lowest
// priority, there are three factors affect whether to add/remove the
// FinalizerDeleteDependents: options, existing finalizers of the object, and
// e.DeleteStrategy.DefaultGarbageCollectionPolicy.
func shouldDeleteDependents(e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool {
// Get default orphan policy from this REST object type
if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok && gcStrategy.DefaultGarbageCollectionPolicy() == rest.Unsupported {
// return false to indicate that we should NOT delete in foreground
return false
}
// If an explicit policy was set at deletion time, that overrides both
if options != nil && options.OrphanDependents != nil {
return false
}
if options != nil && options.PropagationPolicy != nil {
switch *options.PropagationPolicy {
case metav1.DeletePropagationForeground:
return true
case metav1.DeletePropagationBackground, metav1.DeletePropagationOrphan:
return false
}
}
// If a finalizer is set in the object, it overrides the default
// validation has made sure the two cases won't be true at the same time.
finalizers := accessor.GetFinalizers()
for _, f := range finalizers {
switch f {
case metav1.FinalizerDeleteDependents:
return true
case metav1.FinalizerOrphanDependents:
return false
}
}
return false
}
// deletionFinalizers returns the deletion finalizers we should set on the
// object and a bool indicate they did or did not change.
//
// Because the finalizers created here are only cleared by the garbage
// collector, deletionFinalizers always returns false when garbage collection is
// disabled for the Store.
func deletionFinalizers(e *Store, accessor metav1.Object, options *metav1.DeleteOptions) (bool, []string) {
if !e.EnableGarbageCollection {
return false, []string{}
}
shouldOrphan := shouldOrphanDependents(e, accessor, options)
shouldDeleteDependentInForeground := shouldDeleteDependents(e, accessor, options)
newFinalizers := []string{}
// first remove both finalizers, add them back if needed.
for _, f := range accessor.GetFinalizers() {
if f == metav1.FinalizerOrphanDependents || f == metav1.FinalizerDeleteDependents {
continue
}
newFinalizers = append(newFinalizers, f)
}
if shouldOrphan {
newFinalizers = append(newFinalizers, metav1.FinalizerOrphanDependents)
}
if shouldDeleteDependentInForeground {
newFinalizers = append(newFinalizers, metav1.FinalizerDeleteDependents)
}
oldFinalizerSet := sets.NewString(accessor.GetFinalizers()...)
newFinalizersSet := sets.NewString(newFinalizers...)
if oldFinalizerSet.Equal(newFinalizersSet) {
return false, accessor.GetFinalizers()
}
return true, newFinalizers
}
// markAsDeleting sets the obj's DeletionGracePeriodSeconds to 0, and sets the
// DeletionTimestamp to "now". Finalizers are watching for such updates and will
// finalize the object if their IDs are present in the object's Finalizers list.
func markAsDeleting(obj runtime.Object) (err error) {
objectMeta, kerr := meta.Accessor(obj)
if kerr != nil {
return kerr
}
now := metav1.NewTime(time.Now())
// This handles Generation bump for resources that don't support graceful
// deletion. For resources that support graceful deletion is handle in
// pkg/api/rest/delete.go
if objectMeta.GetDeletionTimestamp() == nil && objectMeta.GetGeneration() > 0 {
objectMeta.SetGeneration(objectMeta.GetGeneration() + 1)
}
objectMeta.SetDeletionTimestamp(&now)
var zero int64 = 0
objectMeta.SetDeletionGracePeriodSeconds(&zero)
return nil
}
// updateForGracefulDeletionAndFinalizers updates the given object for
// graceful deletion and finalization by setting the deletion timestamp and
// grace period seconds (graceful deletion) and updating the list of
// finalizers (finalization); it returns:
//
// 1. an error
// 2. a boolean indicating that the object was not found, but it should be
// ignored
// 3. a boolean indicating that the object's grace period is exhausted and it
// should be deleted immediately
// 4. a new output object with the state that was updated
// 5. a copy of the last existing state of the object
func (e *Store) updateForGracefulDeletionAndFinalizers(ctx genericapirequest.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {
lastGraceful := int64(0)
var pendingFinalizers bool
out = e.NewFunc()
err = e.Storage.GuaranteedUpdate(
ctx,
key,
out,
false, /* ignoreNotFound */
&preconditions,
storage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) {
graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options)
if err != nil {
return nil, err
}
if pendingGraceful {
return nil, errAlreadyDeleting
}
// Add/remove the orphan finalizer as the options dictates.
// Note that this occurs after checking pendingGraceufl, so
// finalizers cannot be updated via DeleteOptions if deletion has
// started.
existingAccessor, err := meta.Accessor(existing)
if err != nil {
return nil, err
}
needsUpdate, newFinalizers := deletionFinalizers(e, existingAccessor, options)
if needsUpdate {
existingAccessor.SetFinalizers(newFinalizers)
}
pendingFinalizers = len(existingAccessor.GetFinalizers()) != 0
if !graceful {
// set the DeleteGracePeriods to 0 if the object has pendingFinalizers but not supporting graceful deletion
if pendingFinalizers {
glog.V(6).Infof("update the DeletionTimestamp to \"now\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers", name)
err = markAsDeleting(existing)
if err != nil {
return nil, err
}
return existing, nil
}
return nil, errDeleteNow
}
lastGraceful = *options.GracePeriodSeconds
lastExisting = existing
return existing, nil
}),
)
switch err {
case nil:
// If there are pending finalizers, we never delete the object immediately.
if pendingFinalizers {
return nil, false, false, out, lastExisting
}
if lastGraceful > 0 {
return nil, false, false, out, lastExisting
}
// If we are here, the registry supports grace period mechanism and
// we are intentionally delete gracelessly. In this case, we may
// enter a race with other k8s components. If other component wins
// the race, the object will not be found, and we should tolerate
// the NotFound error. See
// https://github.com/kubernetes/kubernetes/issues/19403 for
// details.
return nil, true, true, out, lastExisting
case errDeleteNow:
// we've updated the object to have a zero grace period, or it's already at 0, so
// we should fall through and truly delete the object.
return nil, false, true, out, lastExisting
case errAlreadyDeleting:
out, err = e.finalizeDelete(ctx, in, true)
return err, false, false, out, lastExisting
default:
return storeerr.InterpretUpdateError(err, e.qualifiedResourceFromContext(ctx), name), false, false, out, lastExisting
}
}
// Delete removes the item from storage.
func (e *Store) Delete(ctx genericapirequest.Context, name string, options *metav1.DeleteOptions) (runtime.Object, bool, error) {
key, err := e.KeyFunc(ctx, name)
if err != nil {
return nil, false, err
}
obj := e.NewFunc()
qualifiedResource := e.qualifiedResourceFromContext(ctx)
if err := e.Storage.Get(ctx, key, "", obj, false); err != nil {
return nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name)
}
// support older consumers of delete by treating "nil" as delete immediately
if options == nil {
options = metav1.NewDeleteOptions(0)
}
var preconditions storage.Preconditions
if options.Preconditions != nil {
preconditions.UID = options.Preconditions.UID
}
graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, obj, options)
if err != nil {
return nil, false, err
}
// this means finalizers cannot be updated via DeleteOptions if a deletion is already pending
if pendingGraceful {
out, err := e.finalizeDelete(ctx, obj, false)
return out, false, err
}
// check if obj has pending finalizers
accessor, err := meta.Accessor(obj)
if err != nil {
return nil, false, kubeerr.NewInternalError(err)
}
pendingFinalizers := len(accessor.GetFinalizers()) != 0
var ignoreNotFound bool
var deleteImmediately bool = true
var lastExisting, out runtime.Object
// Handle combinations of graceful deletion and finalization by issuing
// the correct updates.
shouldUpdateFinalizers, _ := deletionFinalizers(e, accessor, options)
// TODO: remove the check, because we support no-op updates now.
if graceful || pendingFinalizers || shouldUpdateFinalizers {
err, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletionAndFinalizers(ctx, name, key, options, preconditions, obj)
}
// !deleteImmediately covers all cases where err != nil. We keep both to be future-proof.
if !deleteImmediately || err != nil {
return out, false, err
}
// delete immediately, or no graceful deletion supported
glog.V(6).Infof("going to delete %s from registry: ", name)
out = e.NewFunc()
if err := e.Storage.Delete(ctx, key, out, &preconditions); err != nil {
// Please refer to the place where we set ignoreNotFound for the reason
// why we ignore the NotFound error .
if storage.IsNotFound(err) && ignoreNotFound && lastExisting != nil {
// The lastExisting object may not be the last state of the object
// before its deletion, but it's the best approximation.
out, err := e.finalizeDelete(ctx, lastExisting, true)
return out, true, err
}
return nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name)
}
out, err = e.finalizeDelete(ctx, out, true)
return out, true, err
}
// DeleteCollection removes all items returned by List with a given ListOptions from storage.
//
// DeleteCollection is currently NOT atomic. It can happen that only subset of objects
// will be deleted from storage, and then an error will be returned.
// In case of success, the list of deleted objects will be returned.
//
// TODO: Currently, there is no easy way to remove 'directory' entry from storage (if we
// are removing all objects of a given type) with the current API (it's technically
// possibly with storage API, but watch is not delivered correctly then).
// It will be possible to fix it with v3 etcd API.
func (e *Store) DeleteCollection(ctx genericapirequest.Context, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) {
if listOptions == nil {
listOptions = &metainternalversion.ListOptions{}
} else {
listOptions = listOptions.DeepCopy()
}
// DeleteCollection must remain backwards compatible with old clients that expect it to
// remove all resources, initialized or not, within the type. It is also consistent with
// Delete which does not require IncludeUninitialized
listOptions.IncludeUninitialized = true
listObj, err := e.List(ctx, listOptions)
if err != nil {
return nil, err
}
items, err := meta.ExtractList(listObj)
if err != nil {
return nil, err
}
// Spawn a number of goroutines, so that we can issue requests to storage
// in parallel to speed up deletion.
// TODO: Make this proportional to the number of items to delete, up to
// DeleteCollectionWorkers (it doesn't make much sense to spawn 16
// workers to delete 10 items).
workersNumber := e.DeleteCollectionWorkers
if workersNumber < 1 {
workersNumber = 1
}
wg := sync.WaitGroup{}
toProcess := make(chan int, 2*workersNumber)
errs := make(chan error, workersNumber+1)
go func() {
defer utilruntime.HandleCrash(func(panicReason interface{}) {
errs <- fmt.Errorf("DeleteCollection distributor panicked: %v", panicReason)
})
for i := 0; i < len(items); i++ {
toProcess <- i
}
close(toProcess)
}()
wg.Add(workersNumber)
for i := 0; i < workersNumber; i++ {
go func() {
// panics don't cross goroutine boundaries
defer utilruntime.HandleCrash(func(panicReason interface{}) {
errs <- fmt.Errorf("DeleteCollection goroutine panicked: %v", panicReason)
})
defer wg.Done()
for {
index, ok := <-toProcess
if !ok {
return
}
accessor, err := meta.Accessor(items[index])
if err != nil {
errs <- err
return
}
if _, _, err := e.Delete(ctx, accessor.GetName(), options); err != nil && !kubeerr.IsNotFound(err) {
glog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err)
errs <- err
return
}
}
}()
}
wg.Wait()
select {
case err := <-errs:
return nil, err
default:
return listObj, nil
}
}
// finalizeDelete runs the Store's AfterDelete hook if runHooks is set and
// returns the decorated deleted object if appropriate.
func (e *Store) finalizeDelete(ctx genericapirequest.Context, obj runtime.Object, runHooks bool) (runtime.Object, error) {
if runHooks && e.AfterDelete != nil {
if err := e.AfterDelete(obj); err != nil {
return nil, err
}
}
if e.ReturnDeletedObject {
if e.Decorator != nil {
if err := e.Decorator(obj); err != nil {
return nil, err
}
}
return obj, nil
}
// Return information about the deleted object, which enables clients to
// verify that the object was actually deleted and not waiting for finalizers.
accessor, err := meta.Accessor(obj)
if err != nil {
return nil, err
}
qualifiedResource := e.qualifiedResourceFromContext(ctx)
details := &metav1.StatusDetails{
Name: accessor.GetName(),
Group: qualifiedResource.Group,
Kind: qualifiedResource.Resource, // Yes we set Kind field to resource.
UID: accessor.GetUID(),
}
status := &metav1.Status{Status: metav1.StatusSuccess, Details: details}
return status, nil
}
// Watch makes a matcher for the given label and field, and calls
// WatchPredicate. If possible, you should customize PredicateFunc to produce
// a matcher that matches by key. SelectionPredicate does this for you
// automatically.
func (e *Store) Watch(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) {
label := labels.Everything()
if options != nil && options.LabelSelector != nil {
label = options.LabelSelector
}
field := fields.Everything()
if options != nil && options.FieldSelector != nil {
field = options.FieldSelector
}
predicate := e.PredicateFunc(label, field)
resourceVersion := ""
if options != nil {
resourceVersion = options.ResourceVersion
predicate.IncludeUninitialized = options.IncludeUninitialized
}
return e.WatchPredicate(ctx, predicate, resourceVersion)
}
// WatchPredicate starts a watch for the items that matches.
func (e *Store) WatchPredicate(ctx genericapirequest.Context, p storage.SelectionPredicate, resourceVersion string) (watch.Interface, error) {
if name, ok := p.MatchesSingle(); ok {
if key, err := e.KeyFunc(ctx, name); err == nil {
w, err := e.Storage.Watch(ctx, key, resourceVersion, p)
if err != nil {
return nil, err
}
if e.Decorator != nil {
return newDecoratedWatcher(w, e.Decorator), nil
}
return w, nil
}
// if we cannot extract a key based on the current context, the
// optimization is skipped
}
w, err := e.Storage.WatchList(ctx, e.KeyRootFunc(ctx), resourceVersion, p)
if err != nil {
return nil, err
}
if e.Decorator != nil {
return newDecoratedWatcher(w, e.Decorator), nil
}
return w, nil
}
// calculateTTL is a helper for retrieving the updated TTL for an object or
// returning an error if the TTL cannot be calculated. The defaultTTL is
// changed to 1 if less than zero. Zero means no TTL, not expire immediately.
func (e *Store) calculateTTL(obj runtime.Object, defaultTTL int64, update bool) (ttl uint64, err error) {
// TODO: validate this is assertion is still valid.
// etcd may return a negative TTL for a node if the expiration has not
// occurred due to server lag - we will ensure that the value is at least
// set.
if defaultTTL < 0 {
defaultTTL = 1
}
ttl = uint64(defaultTTL)
if e.TTLFunc != nil {
ttl, err = e.TTLFunc(obj, ttl, update)
}
return ttl, err
}
// exportObjectMeta unsets the fields on the given object that should not be
// present when the object is exported.
func exportObjectMeta(accessor metav1.Object, exact bool) {
accessor.SetUID("")
if !exact {
accessor.SetNamespace("")
}
accessor.SetCreationTimestamp(metav1.Time{})
accessor.SetDeletionTimestamp(nil)
accessor.SetResourceVersion("")
accessor.SetSelfLink("")
if len(accessor.GetGenerateName()) > 0 && !exact {
accessor.SetName("")
}
}
// Export implements the rest.Exporter interface
func (e *Store) Export(ctx genericapirequest.Context, name string, opts metav1.ExportOptions) (runtime.Object, error) {
obj, err := e.Get(ctx, name, &metav1.GetOptions{})
if err != nil {
return nil, err
}
if accessor, err := meta.Accessor(obj); err == nil {
exportObjectMeta(accessor, opts.Exact)
} else {
glog.V(4).Infof("Object of type %v does not have ObjectMeta: %v", reflect.TypeOf(obj), err)
}
if e.ExportStrategy != nil {
if err = e.ExportStrategy.Export(ctx, obj, opts.Exact); err != nil {
return nil, err
}
} else {
e.CreateStrategy.PrepareForCreate(ctx, obj)
}
return obj, nil
}
// CompleteWithOptions updates the store with the provided options and
// defaults common fields.
func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error {
if e.DefaultQualifiedResource.Empty() {
return fmt.Errorf("store %#v must have a non-empty qualified resource", e)
}
if e.NewFunc == nil {
return fmt.Errorf("store for %s must have NewFunc set", e.DefaultQualifiedResource.String())
}
if e.NewListFunc == nil {
return fmt.Errorf("store for %s must have NewListFunc set", e.DefaultQualifiedResource.String())
}
if (e.KeyRootFunc == nil) != (e.KeyFunc == nil) {
return fmt.Errorf("store for %s must set both KeyRootFunc and KeyFunc or neither", e.DefaultQualifiedResource.String())
}
var isNamespaced bool
switch {
case e.CreateStrategy != nil:
isNamespaced = e.CreateStrategy.NamespaceScoped()
case e.UpdateStrategy != nil:
isNamespaced = e.UpdateStrategy.NamespaceScoped()
default:
return fmt.Errorf("store for %s must have CreateStrategy or UpdateStrategy set", e.DefaultQualifiedResource.String())
}
if e.DeleteStrategy == nil {
return fmt.Errorf("store for %s must have DeleteStrategy set", e.DefaultQualifiedResource.String())
}
if options.RESTOptions == nil {
return fmt.Errorf("options for %s must have RESTOptions set", e.DefaultQualifiedResource.String())
}
attrFunc := options.AttrFunc
if attrFunc == nil {
if isNamespaced {
attrFunc = storage.DefaultNamespaceScopedAttr
} else {
attrFunc = storage.DefaultClusterScopedAttr
}
}
if e.PredicateFunc == nil {
e.PredicateFunc = func(label labels.Selector, field fields.Selector) storage.SelectionPredicate {
return storage.SelectionPredicate{
Label: label,
Field: field,
GetAttrs: attrFunc,
}
}
}
opts, err := options.RESTOptions.GetRESTOptions(e.DefaultQualifiedResource)
if err != nil {
return err
}
// ResourcePrefix must come from the underlying factory
prefix := opts.ResourcePrefix
if !strings.HasPrefix(prefix, "/") {
prefix = "/" + prefix
}
if prefix == "/" {
return fmt.Errorf("store for %s has an invalid prefix %q", e.DefaultQualifiedResource.String(), opts.ResourcePrefix)
}
// Set the default behavior for storage key generation
if e.KeyRootFunc == nil && e.KeyFunc == nil {
if isNamespaced {
e.KeyRootFunc = func(ctx genericapirequest.Context) string {
return NamespaceKeyRootFunc(ctx, prefix)
}
e.KeyFunc = func(ctx genericapirequest.Context, name string) (string, error) {
return NamespaceKeyFunc(ctx, prefix, name)
}
} else {
e.KeyRootFunc = func(ctx genericapirequest.Context) string {
return prefix
}
e.KeyFunc = func(ctx genericapirequest.Context, name string) (string, error) {
return NoNamespaceKeyFunc(ctx, prefix, name)
}
}
}
// We adapt the store's keyFunc so that we can use it with the StorageDecorator
// without making any assumptions about where objects are stored in etcd
keyFunc := func(obj runtime.Object) (string, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
return "", err
}
if isNamespaced {
return e.KeyFunc(genericapirequest.WithNamespace(genericapirequest.NewContext(), accessor.GetNamespace()), accessor.GetName())
}
return e.KeyFunc(genericapirequest.NewContext(), accessor.GetName())
}
triggerFunc := options.TriggerFunc
if triggerFunc == nil {
triggerFunc = storage.NoTriggerPublisher
}
if e.DeleteCollectionWorkers == 0 {
e.DeleteCollectionWorkers = opts.DeleteCollectionWorkers
}
e.EnableGarbageCollection = opts.EnableGarbageCollection
if e.ObjectNameFunc == nil {
e.ObjectNameFunc = func(obj runtime.Object) (string, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
return "", err
}
return accessor.GetName(), nil
}
}
if e.Storage == nil {
e.Storage, e.DestroyFunc = opts.Decorator(
e.Copier,
opts.StorageConfig,
e.WatchCacheSize,
e.NewFunc(),
prefix,
keyFunc,
e.NewListFunc,
attrFunc,
triggerFunc,
)
}
return nil
}
func (e *Store) ConvertToTable(ctx genericapirequest.Context, object runtime.Object, tableOptions runtime.Object) (*metav1alpha1.Table, error) {
if e.TableConvertor != nil {
return e.TableConvertor.ConvertToTable(ctx, object, tableOptions)
}
return rest.NewDefaultTableConvertor(e.qualifiedResourceFromContext(ctx)).ConvertToTable(ctx, object, tableOptions)
}
Clarify finalizer function
Kubernetes-commit: c845c444d52b81689e4555aec0e8175f687b6a44
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package registry
import (
"fmt"
"reflect"
"strings"
"sync"
"time"
kubeerr "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/validation/path"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1alpha1 "k8s.io/apimachinery/pkg/apis/meta/v1alpha1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/endpoints/request"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/apiserver/pkg/storage"
storeerr "k8s.io/apiserver/pkg/storage/errors"
"github.com/golang/glog"
)
// ObjectFunc is a function to act on a given object. An error may be returned
// if the hook cannot be completed. An ObjectFunc may transform the provided
// object.
type ObjectFunc func(obj runtime.Object) error
// GenericStore interface can be used for type assertions when we need to access the underlying strategies.
type GenericStore interface {
GetCreateStrategy() rest.RESTCreateStrategy
GetUpdateStrategy() rest.RESTUpdateStrategy
GetDeleteStrategy() rest.RESTDeleteStrategy
GetExportStrategy() rest.RESTExportStrategy
}
// Store implements pkg/api/rest.StandardStorage. It's intended to be
// embeddable and allows the consumer to implement any non-generic functions
// that are required. This object is intended to be copyable so that it can be
// used in different ways but share the same underlying behavior.
//
// All fields are required unless specified.
//
// The intended use of this type is embedding within a Kind specific
// RESTStorage implementation. This type provides CRUD semantics on a Kubelike
// resource, handling details like conflict detection with ResourceVersion and
// semantics. The RESTCreateStrategy, RESTUpdateStrategy, and
// RESTDeleteStrategy are generic across all backends, and encapsulate logic
// specific to the API.
//
// TODO: make the default exposed methods exactly match a generic RESTStorage
type Store struct {
// Copier is used to make some storage caching decorators work
Copier runtime.ObjectCopier
// NewFunc returns a new instance of the type this registry returns for a
// GET of a single object, e.g.:
//
// curl GET /apis/group/version/namespaces/my-ns/myresource/name-of-object
NewFunc func() runtime.Object
// NewListFunc returns a new list of the type this registry; it is the
// type returned when the resource is listed, e.g.:
//
// curl GET /apis/group/version/namespaces/my-ns/myresource
NewListFunc func() runtime.Object
// DefaultQualifiedResource is the pluralized name of the resource.
// This field is used if there is no request info present in the context.
// See qualifiedResourceFromContext for details.
DefaultQualifiedResource schema.GroupResource
// KeyRootFunc returns the root etcd key for this resource; should not
// include trailing "/". This is used for operations that work on the
// entire collection (listing and watching).
//
// KeyRootFunc and KeyFunc must be supplied together or not at all.
KeyRootFunc func(ctx genericapirequest.Context) string
// KeyFunc returns the key for a specific object in the collection.
// KeyFunc is called for Create/Update/Get/Delete. Note that 'namespace'
// can be gotten from ctx.
//
// KeyFunc and KeyRootFunc must be supplied together or not at all.
KeyFunc func(ctx genericapirequest.Context, name string) (string, error)
// ObjectNameFunc returns the name of an object or an error.
ObjectNameFunc func(obj runtime.Object) (string, error)
// TTLFunc returns the TTL (time to live) that objects should be persisted
// with. The existing parameter is the current TTL or the default for this
// operation. The update parameter indicates whether this is an operation
// against an existing object.
//
// Objects that are persisted with a TTL are evicted once the TTL expires.
TTLFunc func(obj runtime.Object, existing uint64, update bool) (uint64, error)
// PredicateFunc returns a matcher corresponding to the provided labels
// and fields. The SelectionPredicate returned should return true if the
// object matches the given field and label selectors.
PredicateFunc func(label labels.Selector, field fields.Selector) storage.SelectionPredicate
// EnableGarbageCollection affects the handling of Update and Delete
// requests. Enabling garbage collection allows finalizers to do work to
// finalize this object before the store deletes it.
//
// If any store has garbage collection enabled, it must also be enabled in
// the kube-controller-manager.
EnableGarbageCollection bool
// DeleteCollectionWorkers is the maximum number of workers in a single
// DeleteCollection call. Delete requests for the items in a collection
// are issued in parallel.
DeleteCollectionWorkers int
// Decorator is an optional exit hook on an object returned from the
// underlying storage. The returned object could be an individual object
// (e.g. Pod) or a list type (e.g. PodList). Decorator is intended for
// integrations that are above storage and should only be used for
// specific cases where storage of the value is not appropriate, since
// they cannot be watched.
Decorator ObjectFunc
// CreateStrategy implements resource-specific behavior during creation.
CreateStrategy rest.RESTCreateStrategy
// AfterCreate implements a further operation to run after a resource is
// created and before it is decorated, optional.
AfterCreate ObjectFunc
// UpdateStrategy implements resource-specific behavior during updates.
UpdateStrategy rest.RESTUpdateStrategy
// AfterUpdate implements a further operation to run after a resource is
// updated and before it is decorated, optional.
AfterUpdate ObjectFunc
// DeleteStrategy implements resource-specific behavior during deletion.
DeleteStrategy rest.RESTDeleteStrategy
// AfterDelete implements a further operation to run after a resource is
// deleted and before it is decorated, optional.
AfterDelete ObjectFunc
// ReturnDeletedObject determines whether the Store returns the object
// that was deleted. Otherwise, return a generic success status response.
ReturnDeletedObject bool
// ExportStrategy implements resource-specific behavior during export,
// optional. Exported objects are not decorated.
ExportStrategy rest.RESTExportStrategy
// TableConvertor is an optional interface for transforming items or lists
// of items into tabular output. If unset, the default will be used.
TableConvertor rest.TableConvertor
// Storage is the interface for the underlying storage for the resource.
Storage storage.Interface
// Called to cleanup clients used by the underlying Storage; optional.
DestroyFunc func()
// Maximum size of the watch history cached in memory, in number of entries.
// This value is ignored if Storage is non-nil. Nil is replaced with a default value.
// A zero integer will disable caching.
WatchCacheSize *int
}
// Note: the rest.StandardStorage interface aggregates the common REST verbs
var _ rest.StandardStorage = &Store{}
var _ rest.Exporter = &Store{}
var _ rest.TableConvertor = &Store{}
var _ GenericStore = &Store{}
const OptimisticLockErrorMsg = "the object has been modified; please apply your changes to the latest version and try again"
// NamespaceKeyRootFunc is the default function for constructing storage paths
// to resource directories enforcing namespace rules.
func NamespaceKeyRootFunc(ctx genericapirequest.Context, prefix string) string {
key := prefix
ns, ok := genericapirequest.NamespaceFrom(ctx)
if ok && len(ns) > 0 {
key = key + "/" + ns
}
return key
}
// NamespaceKeyFunc is the default function for constructing storage paths to
// a resource relative to the given prefix enforcing namespace rules. If the
// context does not contain a namespace, it errors.
func NamespaceKeyFunc(ctx genericapirequest.Context, prefix string, name string) (string, error) {
key := NamespaceKeyRootFunc(ctx, prefix)
ns, ok := genericapirequest.NamespaceFrom(ctx)
if !ok || len(ns) == 0 {
return "", kubeerr.NewBadRequest("Namespace parameter required.")
}
if len(name) == 0 {
return "", kubeerr.NewBadRequest("Name parameter required.")
}
if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {
return "", kubeerr.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";")))
}
key = key + "/" + name
return key, nil
}
// NoNamespaceKeyFunc is the default function for constructing storage paths
// to a resource relative to the given prefix without a namespace.
func NoNamespaceKeyFunc(ctx genericapirequest.Context, prefix string, name string) (string, error) {
if len(name) == 0 {
return "", kubeerr.NewBadRequest("Name parameter required.")
}
if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 {
return "", kubeerr.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";")))
}
key := prefix + "/" + name
return key, nil
}
// New implements RESTStorage.New.
func (e *Store) New() runtime.Object {
return e.NewFunc()
}
// NewList implements rest.Lister.
func (e *Store) NewList() runtime.Object {
return e.NewListFunc()
}
// GetCreateStrategy implements GenericStore.
func (e *Store) GetCreateStrategy() rest.RESTCreateStrategy {
return e.CreateStrategy
}
// GetUpdateStrategy implements GenericStore.
func (e *Store) GetUpdateStrategy() rest.RESTUpdateStrategy {
return e.UpdateStrategy
}
// GetDeleteStrategy implements GenericStore.
func (e *Store) GetDeleteStrategy() rest.RESTDeleteStrategy {
return e.DeleteStrategy
}
// GetExportStrategy implements GenericStore.
func (e *Store) GetExportStrategy() rest.RESTExportStrategy {
return e.ExportStrategy
}
// List returns a list of items matching labels and field according to the
// store's PredicateFunc.
func (e *Store) List(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (runtime.Object, error) {
label := labels.Everything()
if options != nil && options.LabelSelector != nil {
label = options.LabelSelector
}
field := fields.Everything()
if options != nil && options.FieldSelector != nil {
field = options.FieldSelector
}
out, err := e.ListPredicate(ctx, e.PredicateFunc(label, field), options)
if err != nil {
return nil, err
}
if e.Decorator != nil {
if err := e.Decorator(out); err != nil {
return nil, err
}
}
return out, nil
}
// ListPredicate returns a list of all the items matching the given
// SelectionPredicate.
func (e *Store) ListPredicate(ctx genericapirequest.Context, p storage.SelectionPredicate, options *metainternalversion.ListOptions) (runtime.Object, error) {
if options == nil {
// By default we should serve the request from etcd.
options = &metainternalversion.ListOptions{ResourceVersion: ""}
}
p.IncludeUninitialized = options.IncludeUninitialized
list := e.NewListFunc()
qualifiedResource := e.qualifiedResourceFromContext(ctx)
if name, ok := p.MatchesSingle(); ok {
if key, err := e.KeyFunc(ctx, name); err == nil {
err := e.Storage.GetToList(ctx, key, options.ResourceVersion, p, list)
return list, storeerr.InterpretListError(err, qualifiedResource)
}
// if we cannot extract a key based on the current context, the optimization is skipped
}
err := e.Storage.List(ctx, e.KeyRootFunc(ctx), options.ResourceVersion, p, list)
return list, storeerr.InterpretListError(err, qualifiedResource)
}
// Create inserts a new item according to the unique key from the object.
func (e *Store) Create(ctx genericapirequest.Context, obj runtime.Object, includeUninitialized bool) (runtime.Object, error) {
if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
return nil, err
}
name, err := e.ObjectNameFunc(obj)
if err != nil {
return nil, err
}
key, err := e.KeyFunc(ctx, name)
if err != nil {
return nil, err
}
qualifiedResource := e.qualifiedResourceFromContext(ctx)
ttl, err := e.calculateTTL(obj, 0, false)
if err != nil {
return nil, err
}
out := e.NewFunc()
if err := e.Storage.Create(ctx, key, obj, out, ttl); err != nil {
err = storeerr.InterpretCreateError(err, qualifiedResource, name)
err = rest.CheckGeneratedNameError(e.CreateStrategy, err, obj)
if !kubeerr.IsAlreadyExists(err) {
return nil, err
}
if errGet := e.Storage.Get(ctx, key, "", out, false); errGet != nil {
return nil, err
}
accessor, errGetAcc := meta.Accessor(out)
if errGetAcc != nil {
return nil, err
}
if accessor.GetDeletionTimestamp() != nil {
msg := &err.(*kubeerr.StatusError).ErrStatus.Message
*msg = fmt.Sprintf("object is being deleted: %s", *msg)
}
return nil, err
}
if e.AfterCreate != nil {
if err := e.AfterCreate(out); err != nil {
return nil, err
}
}
if e.Decorator != nil {
if err := e.Decorator(obj); err != nil {
return nil, err
}
}
if !includeUninitialized {
return e.WaitForInitialized(ctx, out)
}
return out, nil
}
// WaitForInitialized holds until the object is initialized, or returns an error if the default limit expires.
// This method is exposed publicly for consumers of generic rest tooling.
func (e *Store) WaitForInitialized(ctx genericapirequest.Context, obj runtime.Object) (runtime.Object, error) {
// return early if we don't have initializers, or if they've completed already
accessor, err := meta.Accessor(obj)
if err != nil {
return obj, nil
}
initializers := accessor.GetInitializers()
if initializers == nil {
return obj, nil
}
if result := initializers.Result; result != nil {
return nil, kubeerr.FromObject(result)
}
key, err := e.KeyFunc(ctx, accessor.GetName())
if err != nil {
return nil, err
}
qualifiedResource := e.qualifiedResourceFromContext(ctx)
w, err := e.Storage.Watch(ctx, key, accessor.GetResourceVersion(), storage.SelectionPredicate{
Label: labels.Everything(),
Field: fields.Everything(),
IncludeUninitialized: true,
})
if err != nil {
return nil, err
}
defer w.Stop()
latest := obj
ch := w.ResultChan()
for {
select {
case event, ok := <-ch:
if !ok {
msg := fmt.Sprintf("server has timed out waiting for the initialization of %s %s",
qualifiedResource.String(), accessor.GetName())
return nil, kubeerr.NewTimeoutError(msg, 0)
}
switch event.Type {
case watch.Deleted:
if latest = event.Object; latest != nil {
if accessor, err := meta.Accessor(latest); err == nil {
if initializers := accessor.GetInitializers(); initializers != nil && initializers.Result != nil {
// initialization failed, but we missed the modification event
return nil, kubeerr.FromObject(initializers.Result)
}
}
}
return nil, kubeerr.NewInternalError(fmt.Errorf("object deleted while waiting for creation"))
case watch.Error:
if status, ok := event.Object.(*metav1.Status); ok {
return nil, &kubeerr.StatusError{ErrStatus: *status}
}
return nil, kubeerr.NewInternalError(fmt.Errorf("unexpected object in watch stream, can't complete initialization %T", event.Object))
case watch.Modified:
latest = event.Object
accessor, err = meta.Accessor(latest)
if err != nil {
return nil, kubeerr.NewInternalError(fmt.Errorf("object no longer has access to metadata %T: %v", latest, err))
}
initializers := accessor.GetInitializers()
if initializers == nil {
// completed initialization
return latest, nil
}
if result := initializers.Result; result != nil {
// initialization failed
return nil, kubeerr.FromObject(result)
}
}
case <-ctx.Done():
}
}
}
// shouldDeleteDuringUpdate checks if a Update is removing all the object's
// finalizers. If so, it further checks if the object's
// DeletionGracePeriodSeconds is 0.
func (e *Store) shouldDeleteDuringUpdate(ctx genericapirequest.Context, key string, obj, existing runtime.Object) bool {
newMeta, err := meta.Accessor(obj)
if err != nil {
utilruntime.HandleError(err)
return false
}
oldMeta, err := meta.Accessor(existing)
if err != nil {
utilruntime.HandleError(err)
return false
}
return len(newMeta.GetFinalizers()) == 0 && oldMeta.GetDeletionGracePeriodSeconds() != nil && *oldMeta.GetDeletionGracePeriodSeconds() == 0
}
// shouldDeleteForFailedInitialization returns true if the provided object is initializing and has
// a failure recorded.
func (e *Store) shouldDeleteForFailedInitialization(ctx genericapirequest.Context, obj runtime.Object) bool {
m, err := meta.Accessor(obj)
if err != nil {
utilruntime.HandleError(err)
return false
}
if initializers := m.GetInitializers(); initializers != nil && initializers.Result != nil {
return true
}
return false
}
// deleteWithoutFinalizers handles deleting an object ignoring its finalizer list.
// Used for objects that are either been finalized or have never initialized.
func (e *Store) deleteWithoutFinalizers(ctx genericapirequest.Context, name, key string, obj runtime.Object, preconditions *storage.Preconditions) (runtime.Object, bool, error) {
out := e.NewFunc()
glog.V(6).Infof("going to delete %s from registry, triggered by update", name)
if err := e.Storage.Delete(ctx, key, out, preconditions); err != nil {
// Deletion is racy, i.e., there could be multiple update
// requests to remove all finalizers from the object, so we
// ignore the NotFound error.
if storage.IsNotFound(err) {
_, err := e.finalizeDelete(ctx, obj, true)
// clients are expecting an updated object if a PUT succeeded,
// but finalizeDelete returns a metav1.Status, so return
// the object in the request instead.
return obj, false, err
}
return nil, false, storeerr.InterpretDeleteError(err, e.qualifiedResourceFromContext(ctx), name)
}
_, err := e.finalizeDelete(ctx, out, true)
// clients are expecting an updated object if a PUT succeeded, but
// finalizeDelete returns a metav1.Status, so return the object in
// the request instead.
return obj, false, err
}
// Update performs an atomic update and set of the object. Returns the result of the update
// or an error. If the registry allows create-on-update, the create flow will be executed.
// A bool is returned along with the object and any errors, to indicate object creation.
func (e *Store) Update(ctx genericapirequest.Context, name string, objInfo rest.UpdatedObjectInfo) (runtime.Object, bool, error) {
key, err := e.KeyFunc(ctx, name)
if err != nil {
return nil, false, err
}
var (
creatingObj runtime.Object
creating = false
)
qualifiedResource := e.qualifiedResourceFromContext(ctx)
storagePreconditions := &storage.Preconditions{}
if preconditions := objInfo.Preconditions(); preconditions != nil {
storagePreconditions.UID = preconditions.UID
}
out := e.NewFunc()
// deleteObj is only used in case a deletion is carried out
var deleteObj runtime.Object
err = e.Storage.GuaranteedUpdate(ctx, key, out, true, storagePreconditions, func(existing runtime.Object, res storage.ResponseMeta) (runtime.Object, *uint64, error) {
// Given the existing object, get the new object
obj, err := objInfo.UpdatedObject(ctx, existing)
if err != nil {
return nil, nil, err
}
// If AllowUnconditionalUpdate() is true and the object specified by
// the user does not have a resource version, then we populate it with
// the latest version. Else, we check that the version specified by
// the user matches the version of latest storage object.
resourceVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)
if err != nil {
return nil, nil, err
}
doUnconditionalUpdate := resourceVersion == 0 && e.UpdateStrategy.AllowUnconditionalUpdate()
version, err := e.Storage.Versioner().ObjectResourceVersion(existing)
if err != nil {
return nil, nil, err
}
if version == 0 {
if !e.UpdateStrategy.AllowCreateOnUpdate() {
return nil, nil, kubeerr.NewNotFound(qualifiedResource, name)
}
creating = true
creatingObj = obj
if err := rest.BeforeCreate(e.CreateStrategy, ctx, obj); err != nil {
return nil, nil, err
}
ttl, err := e.calculateTTL(obj, 0, false)
if err != nil {
return nil, nil, err
}
return obj, &ttl, nil
}
creating = false
creatingObj = nil
if doUnconditionalUpdate {
// Update the object's resource version to match the latest
// storage object's resource version.
err = e.Storage.Versioner().UpdateObject(obj, res.ResourceVersion)
if err != nil {
return nil, nil, err
}
} else {
// Check if the object's resource version matches the latest
// resource version.
newVersion, err := e.Storage.Versioner().ObjectResourceVersion(obj)
if err != nil {
return nil, nil, err
}
if newVersion == 0 {
// TODO: The Invalid error should have a field for Resource.
// After that field is added, we should fill the Resource and
// leave the Kind field empty. See the discussion in #18526.
qualifiedKind := schema.GroupKind{Group: qualifiedResource.Group, Kind: qualifiedResource.Resource}
fieldErrList := field.ErrorList{field.Invalid(field.NewPath("metadata").Child("resourceVersion"), newVersion, "must be specified for an update")}
return nil, nil, kubeerr.NewInvalid(qualifiedKind, name, fieldErrList)
}
if newVersion != version {
return nil, nil, kubeerr.NewConflict(qualifiedResource, name, fmt.Errorf(OptimisticLockErrorMsg))
}
}
if err := rest.BeforeUpdate(e.UpdateStrategy, ctx, obj, existing); err != nil {
return nil, nil, err
}
if e.shouldDeleteDuringUpdate(ctx, key, obj, existing) {
deleteObj = obj
return nil, nil, errEmptiedFinalizers
}
ttl, err := e.calculateTTL(obj, res.TTL, true)
if err != nil {
return nil, nil, err
}
if int64(ttl) != res.TTL {
return obj, &ttl, nil
}
return obj, nil, nil
})
if err != nil {
// delete the object
if err == errEmptiedFinalizers {
return e.deleteWithoutFinalizers(ctx, name, key, deleteObj, storagePreconditions)
}
if creating {
err = storeerr.InterpretCreateError(err, qualifiedResource, name)
err = rest.CheckGeneratedNameError(e.CreateStrategy, err, creatingObj)
} else {
err = storeerr.InterpretUpdateError(err, qualifiedResource, name)
}
return nil, false, err
}
if e.shouldDeleteForFailedInitialization(ctx, out) {
return e.deleteWithoutFinalizers(ctx, name, key, out, storagePreconditions)
}
if creating {
if e.AfterCreate != nil {
if err := e.AfterCreate(out); err != nil {
return nil, false, err
}
}
} else {
if e.AfterUpdate != nil {
if err := e.AfterUpdate(out); err != nil {
return nil, false, err
}
}
}
if e.Decorator != nil {
if err := e.Decorator(out); err != nil {
return nil, false, err
}
}
return out, creating, nil
}
// Get retrieves the item from storage.
func (e *Store) Get(ctx genericapirequest.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
obj := e.NewFunc()
key, err := e.KeyFunc(ctx, name)
if err != nil {
return nil, err
}
if err := e.Storage.Get(ctx, key, options.ResourceVersion, obj, false); err != nil {
return nil, storeerr.InterpretGetError(err, e.qualifiedResourceFromContext(ctx), name)
}
if e.Decorator != nil {
if err := e.Decorator(obj); err != nil {
return nil, err
}
}
return obj, nil
}
// qualifiedResourceFromContext attempts to retrieve a GroupResource from the context's request info.
// If the context has no request info, DefaultQualifiedResource is used.
func (e *Store) qualifiedResourceFromContext(ctx genericapirequest.Context) schema.GroupResource {
if info, ok := request.RequestInfoFrom(ctx); ok {
return schema.GroupResource{Group: info.APIGroup, Resource: info.Resource}
}
// some implementations access storage directly and thus the context has no RequestInfo
return e.DefaultQualifiedResource
}
var (
errAlreadyDeleting = fmt.Errorf("abort delete")
errDeleteNow = fmt.Errorf("delete now")
errEmptiedFinalizers = fmt.Errorf("emptied finalizers")
)
// shouldOrphanDependents returns true if the finalizer for orphaning should be set
// updated for FinalizerOrphanDependents. In the order of highest to lowest
// priority, there are three factors affect whether to add/remove the
// FinalizerOrphanDependents: options, existing finalizers of the object,
// and e.DeleteStrategy.DefaultGarbageCollectionPolicy.
func shouldOrphanDependents(e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool {
if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok {
if gcStrategy.DefaultGarbageCollectionPolicy() == rest.Unsupported {
// return false to indicate that we should NOT orphan
return false
}
}
// An explicit policy was set at deletion time, that overrides everything
if options != nil && options.OrphanDependents != nil {
return *options.OrphanDependents
}
if options != nil && options.PropagationPolicy != nil {
switch *options.PropagationPolicy {
case metav1.DeletePropagationOrphan:
return true
case metav1.DeletePropagationBackground, metav1.DeletePropagationForeground:
return false
}
}
// If a finalizer is set in the object, it overrides the default
// validation should make sure the two cases won't be true at the same time.
finalizers := accessor.GetFinalizers()
for _, f := range finalizers {
switch f {
case metav1.FinalizerOrphanDependents:
return true
case metav1.FinalizerDeleteDependents:
return false
}
}
// Get default orphan policy from this REST object type if it exists
if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok {
if gcStrategy.DefaultGarbageCollectionPolicy() == rest.OrphanDependents {
return true
}
}
return false
}
// shouldDeleteDependents returns true if the finalizer for foreground deletion should be set
// updated for FinalizerDeleteDependents. In the order of highest to lowest
// priority, there are three factors affect whether to add/remove the
// FinalizerDeleteDependents: options, existing finalizers of the object, and
// e.DeleteStrategy.DefaultGarbageCollectionPolicy.
func shouldDeleteDependents(e *Store, accessor metav1.Object, options *metav1.DeleteOptions) bool {
// Get default orphan policy from this REST object type
if gcStrategy, ok := e.DeleteStrategy.(rest.GarbageCollectionDeleteStrategy); ok && gcStrategy.DefaultGarbageCollectionPolicy() == rest.Unsupported {
// return false to indicate that we should NOT delete in foreground
return false
}
// If an explicit policy was set at deletion time, that overrides both
if options != nil && options.OrphanDependents != nil {
return false
}
if options != nil && options.PropagationPolicy != nil {
switch *options.PropagationPolicy {
case metav1.DeletePropagationForeground:
return true
case metav1.DeletePropagationBackground, metav1.DeletePropagationOrphan:
return false
}
}
// If a finalizer is set in the object, it overrides the default
// validation has made sure the two cases won't be true at the same time.
finalizers := accessor.GetFinalizers()
for _, f := range finalizers {
switch f {
case metav1.FinalizerDeleteDependents:
return true
case metav1.FinalizerOrphanDependents:
return false
}
}
return false
}
// deletionFinalizersForGarbageCollection analyzes the object and delete options
// to determine whether the object is in need of finalization by the garbage
// collector. If so, returns the set of deletion finalizers to apply and a bool
// indicating whether the finalizer list has changed and is in need of updating.
//
// The finalizers returned are intended to be handled by the garbage collector.
// If garbage collection is disabled for the store, this function returns false
// to ensure finalizers aren't set which will never be cleared.
func deletionFinalizersForGarbageCollection(e *Store, accessor metav1.Object, options *metav1.DeleteOptions) (bool, []string) {
if !e.EnableGarbageCollection {
return false, []string{}
}
shouldOrphan := shouldOrphanDependents(e, accessor, options)
shouldDeleteDependentInForeground := shouldDeleteDependents(e, accessor, options)
newFinalizers := []string{}
// first remove both finalizers, add them back if needed.
for _, f := range accessor.GetFinalizers() {
if f == metav1.FinalizerOrphanDependents || f == metav1.FinalizerDeleteDependents {
continue
}
newFinalizers = append(newFinalizers, f)
}
if shouldOrphan {
newFinalizers = append(newFinalizers, metav1.FinalizerOrphanDependents)
}
if shouldDeleteDependentInForeground {
newFinalizers = append(newFinalizers, metav1.FinalizerDeleteDependents)
}
oldFinalizerSet := sets.NewString(accessor.GetFinalizers()...)
newFinalizersSet := sets.NewString(newFinalizers...)
if oldFinalizerSet.Equal(newFinalizersSet) {
return false, accessor.GetFinalizers()
}
return true, newFinalizers
}
// markAsDeleting sets the obj's DeletionGracePeriodSeconds to 0, and sets the
// DeletionTimestamp to "now". Finalizers are watching for such updates and will
// finalize the object if their IDs are present in the object's Finalizers list.
func markAsDeleting(obj runtime.Object) (err error) {
objectMeta, kerr := meta.Accessor(obj)
if kerr != nil {
return kerr
}
now := metav1.NewTime(time.Now())
// This handles Generation bump for resources that don't support graceful
// deletion. For resources that support graceful deletion is handle in
// pkg/api/rest/delete.go
if objectMeta.GetDeletionTimestamp() == nil && objectMeta.GetGeneration() > 0 {
objectMeta.SetGeneration(objectMeta.GetGeneration() + 1)
}
objectMeta.SetDeletionTimestamp(&now)
var zero int64 = 0
objectMeta.SetDeletionGracePeriodSeconds(&zero)
return nil
}
// updateForGracefulDeletionAndFinalizers updates the given object for
// graceful deletion and finalization by setting the deletion timestamp and
// grace period seconds (graceful deletion) and updating the list of
// finalizers (finalization); it returns:
//
// 1. an error
// 2. a boolean indicating that the object was not found, but it should be
// ignored
// 3. a boolean indicating that the object's grace period is exhausted and it
// should be deleted immediately
// 4. a new output object with the state that was updated
// 5. a copy of the last existing state of the object
func (e *Store) updateForGracefulDeletionAndFinalizers(ctx genericapirequest.Context, name, key string, options *metav1.DeleteOptions, preconditions storage.Preconditions, in runtime.Object) (err error, ignoreNotFound, deleteImmediately bool, out, lastExisting runtime.Object) {
lastGraceful := int64(0)
var pendingFinalizers bool
out = e.NewFunc()
err = e.Storage.GuaranteedUpdate(
ctx,
key,
out,
false, /* ignoreNotFound */
&preconditions,
storage.SimpleUpdate(func(existing runtime.Object) (runtime.Object, error) {
graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, existing, options)
if err != nil {
return nil, err
}
if pendingGraceful {
return nil, errAlreadyDeleting
}
// Add/remove the orphan finalizer as the options dictates.
// Note that this occurs after checking pendingGraceufl, so
// finalizers cannot be updated via DeleteOptions if deletion has
// started.
existingAccessor, err := meta.Accessor(existing)
if err != nil {
return nil, err
}
needsUpdate, newFinalizers := deletionFinalizersForGarbageCollection(e, existingAccessor, options)
if needsUpdate {
existingAccessor.SetFinalizers(newFinalizers)
}
pendingFinalizers = len(existingAccessor.GetFinalizers()) != 0
if !graceful {
// set the DeleteGracePeriods to 0 if the object has pendingFinalizers but not supporting graceful deletion
if pendingFinalizers {
glog.V(6).Infof("update the DeletionTimestamp to \"now\" and GracePeriodSeconds to 0 for object %s, because it has pending finalizers", name)
err = markAsDeleting(existing)
if err != nil {
return nil, err
}
return existing, nil
}
return nil, errDeleteNow
}
lastGraceful = *options.GracePeriodSeconds
lastExisting = existing
return existing, nil
}),
)
switch err {
case nil:
// If there are pending finalizers, we never delete the object immediately.
if pendingFinalizers {
return nil, false, false, out, lastExisting
}
if lastGraceful > 0 {
return nil, false, false, out, lastExisting
}
// If we are here, the registry supports grace period mechanism and
// we are intentionally delete gracelessly. In this case, we may
// enter a race with other k8s components. If other component wins
// the race, the object will not be found, and we should tolerate
// the NotFound error. See
// https://github.com/kubernetes/kubernetes/issues/19403 for
// details.
return nil, true, true, out, lastExisting
case errDeleteNow:
// we've updated the object to have a zero grace period, or it's already at 0, so
// we should fall through and truly delete the object.
return nil, false, true, out, lastExisting
case errAlreadyDeleting:
out, err = e.finalizeDelete(ctx, in, true)
return err, false, false, out, lastExisting
default:
return storeerr.InterpretUpdateError(err, e.qualifiedResourceFromContext(ctx), name), false, false, out, lastExisting
}
}
// Delete removes the item from storage.
func (e *Store) Delete(ctx genericapirequest.Context, name string, options *metav1.DeleteOptions) (runtime.Object, bool, error) {
key, err := e.KeyFunc(ctx, name)
if err != nil {
return nil, false, err
}
obj := e.NewFunc()
qualifiedResource := e.qualifiedResourceFromContext(ctx)
if err := e.Storage.Get(ctx, key, "", obj, false); err != nil {
return nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name)
}
// support older consumers of delete by treating "nil" as delete immediately
if options == nil {
options = metav1.NewDeleteOptions(0)
}
var preconditions storage.Preconditions
if options.Preconditions != nil {
preconditions.UID = options.Preconditions.UID
}
graceful, pendingGraceful, err := rest.BeforeDelete(e.DeleteStrategy, ctx, obj, options)
if err != nil {
return nil, false, err
}
// this means finalizers cannot be updated via DeleteOptions if a deletion is already pending
if pendingGraceful {
out, err := e.finalizeDelete(ctx, obj, false)
return out, false, err
}
// check if obj has pending finalizers
accessor, err := meta.Accessor(obj)
if err != nil {
return nil, false, kubeerr.NewInternalError(err)
}
pendingFinalizers := len(accessor.GetFinalizers()) != 0
var ignoreNotFound bool
var deleteImmediately bool = true
var lastExisting, out runtime.Object
// Handle combinations of graceful deletion and finalization by issuing
// the correct updates.
shouldUpdateFinalizers, _ := deletionFinalizersForGarbageCollection(e, accessor, options)
// TODO: remove the check, because we support no-op updates now.
if graceful || pendingFinalizers || shouldUpdateFinalizers {
err, ignoreNotFound, deleteImmediately, out, lastExisting = e.updateForGracefulDeletionAndFinalizers(ctx, name, key, options, preconditions, obj)
}
// !deleteImmediately covers all cases where err != nil. We keep both to be future-proof.
if !deleteImmediately || err != nil {
return out, false, err
}
// delete immediately, or no graceful deletion supported
glog.V(6).Infof("going to delete %s from registry: ", name)
out = e.NewFunc()
if err := e.Storage.Delete(ctx, key, out, &preconditions); err != nil {
// Please refer to the place where we set ignoreNotFound for the reason
// why we ignore the NotFound error .
if storage.IsNotFound(err) && ignoreNotFound && lastExisting != nil {
// The lastExisting object may not be the last state of the object
// before its deletion, but it's the best approximation.
out, err := e.finalizeDelete(ctx, lastExisting, true)
return out, true, err
}
return nil, false, storeerr.InterpretDeleteError(err, qualifiedResource, name)
}
out, err = e.finalizeDelete(ctx, out, true)
return out, true, err
}
// DeleteCollection removes all items returned by List with a given ListOptions from storage.
//
// DeleteCollection is currently NOT atomic. It can happen that only subset of objects
// will be deleted from storage, and then an error will be returned.
// In case of success, the list of deleted objects will be returned.
//
// TODO: Currently, there is no easy way to remove 'directory' entry from storage (if we
// are removing all objects of a given type) with the current API (it's technically
// possibly with storage API, but watch is not delivered correctly then).
// It will be possible to fix it with v3 etcd API.
func (e *Store) DeleteCollection(ctx genericapirequest.Context, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) {
if listOptions == nil {
listOptions = &metainternalversion.ListOptions{}
} else {
listOptions = listOptions.DeepCopy()
}
// DeleteCollection must remain backwards compatible with old clients that expect it to
// remove all resources, initialized or not, within the type. It is also consistent with
// Delete which does not require IncludeUninitialized
listOptions.IncludeUninitialized = true
listObj, err := e.List(ctx, listOptions)
if err != nil {
return nil, err
}
items, err := meta.ExtractList(listObj)
if err != nil {
return nil, err
}
// Spawn a number of goroutines, so that we can issue requests to storage
// in parallel to speed up deletion.
// TODO: Make this proportional to the number of items to delete, up to
// DeleteCollectionWorkers (it doesn't make much sense to spawn 16
// workers to delete 10 items).
workersNumber := e.DeleteCollectionWorkers
if workersNumber < 1 {
workersNumber = 1
}
wg := sync.WaitGroup{}
toProcess := make(chan int, 2*workersNumber)
errs := make(chan error, workersNumber+1)
go func() {
defer utilruntime.HandleCrash(func(panicReason interface{}) {
errs <- fmt.Errorf("DeleteCollection distributor panicked: %v", panicReason)
})
for i := 0; i < len(items); i++ {
toProcess <- i
}
close(toProcess)
}()
wg.Add(workersNumber)
for i := 0; i < workersNumber; i++ {
go func() {
// panics don't cross goroutine boundaries
defer utilruntime.HandleCrash(func(panicReason interface{}) {
errs <- fmt.Errorf("DeleteCollection goroutine panicked: %v", panicReason)
})
defer wg.Done()
for {
index, ok := <-toProcess
if !ok {
return
}
accessor, err := meta.Accessor(items[index])
if err != nil {
errs <- err
return
}
if _, _, err := e.Delete(ctx, accessor.GetName(), options); err != nil && !kubeerr.IsNotFound(err) {
glog.V(4).Infof("Delete %s in DeleteCollection failed: %v", accessor.GetName(), err)
errs <- err
return
}
}
}()
}
wg.Wait()
select {
case err := <-errs:
return nil, err
default:
return listObj, nil
}
}
// finalizeDelete runs the Store's AfterDelete hook if runHooks is set and
// returns the decorated deleted object if appropriate.
func (e *Store) finalizeDelete(ctx genericapirequest.Context, obj runtime.Object, runHooks bool) (runtime.Object, error) {
if runHooks && e.AfterDelete != nil {
if err := e.AfterDelete(obj); err != nil {
return nil, err
}
}
if e.ReturnDeletedObject {
if e.Decorator != nil {
if err := e.Decorator(obj); err != nil {
return nil, err
}
}
return obj, nil
}
// Return information about the deleted object, which enables clients to
// verify that the object was actually deleted and not waiting for finalizers.
accessor, err := meta.Accessor(obj)
if err != nil {
return nil, err
}
qualifiedResource := e.qualifiedResourceFromContext(ctx)
details := &metav1.StatusDetails{
Name: accessor.GetName(),
Group: qualifiedResource.Group,
Kind: qualifiedResource.Resource, // Yes we set Kind field to resource.
UID: accessor.GetUID(),
}
status := &metav1.Status{Status: metav1.StatusSuccess, Details: details}
return status, nil
}
// Watch makes a matcher for the given label and field, and calls
// WatchPredicate. If possible, you should customize PredicateFunc to produce
// a matcher that matches by key. SelectionPredicate does this for you
// automatically.
func (e *Store) Watch(ctx genericapirequest.Context, options *metainternalversion.ListOptions) (watch.Interface, error) {
label := labels.Everything()
if options != nil && options.LabelSelector != nil {
label = options.LabelSelector
}
field := fields.Everything()
if options != nil && options.FieldSelector != nil {
field = options.FieldSelector
}
predicate := e.PredicateFunc(label, field)
resourceVersion := ""
if options != nil {
resourceVersion = options.ResourceVersion
predicate.IncludeUninitialized = options.IncludeUninitialized
}
return e.WatchPredicate(ctx, predicate, resourceVersion)
}
// WatchPredicate starts a watch for the items that matches.
func (e *Store) WatchPredicate(ctx genericapirequest.Context, p storage.SelectionPredicate, resourceVersion string) (watch.Interface, error) {
if name, ok := p.MatchesSingle(); ok {
if key, err := e.KeyFunc(ctx, name); err == nil {
w, err := e.Storage.Watch(ctx, key, resourceVersion, p)
if err != nil {
return nil, err
}
if e.Decorator != nil {
return newDecoratedWatcher(w, e.Decorator), nil
}
return w, nil
}
// if we cannot extract a key based on the current context, the
// optimization is skipped
}
w, err := e.Storage.WatchList(ctx, e.KeyRootFunc(ctx), resourceVersion, p)
if err != nil {
return nil, err
}
if e.Decorator != nil {
return newDecoratedWatcher(w, e.Decorator), nil
}
return w, nil
}
// calculateTTL is a helper for retrieving the updated TTL for an object or
// returning an error if the TTL cannot be calculated. The defaultTTL is
// changed to 1 if less than zero. Zero means no TTL, not expire immediately.
func (e *Store) calculateTTL(obj runtime.Object, defaultTTL int64, update bool) (ttl uint64, err error) {
// TODO: validate this is assertion is still valid.
// etcd may return a negative TTL for a node if the expiration has not
// occurred due to server lag - we will ensure that the value is at least
// set.
if defaultTTL < 0 {
defaultTTL = 1
}
ttl = uint64(defaultTTL)
if e.TTLFunc != nil {
ttl, err = e.TTLFunc(obj, ttl, update)
}
return ttl, err
}
// exportObjectMeta unsets the fields on the given object that should not be
// present when the object is exported.
func exportObjectMeta(accessor metav1.Object, exact bool) {
accessor.SetUID("")
if !exact {
accessor.SetNamespace("")
}
accessor.SetCreationTimestamp(metav1.Time{})
accessor.SetDeletionTimestamp(nil)
accessor.SetResourceVersion("")
accessor.SetSelfLink("")
if len(accessor.GetGenerateName()) > 0 && !exact {
accessor.SetName("")
}
}
// Export implements the rest.Exporter interface
func (e *Store) Export(ctx genericapirequest.Context, name string, opts metav1.ExportOptions) (runtime.Object, error) {
obj, err := e.Get(ctx, name, &metav1.GetOptions{})
if err != nil {
return nil, err
}
if accessor, err := meta.Accessor(obj); err == nil {
exportObjectMeta(accessor, opts.Exact)
} else {
glog.V(4).Infof("Object of type %v does not have ObjectMeta: %v", reflect.TypeOf(obj), err)
}
if e.ExportStrategy != nil {
if err = e.ExportStrategy.Export(ctx, obj, opts.Exact); err != nil {
return nil, err
}
} else {
e.CreateStrategy.PrepareForCreate(ctx, obj)
}
return obj, nil
}
// CompleteWithOptions updates the store with the provided options and
// defaults common fields.
func (e *Store) CompleteWithOptions(options *generic.StoreOptions) error {
if e.DefaultQualifiedResource.Empty() {
return fmt.Errorf("store %#v must have a non-empty qualified resource", e)
}
if e.NewFunc == nil {
return fmt.Errorf("store for %s must have NewFunc set", e.DefaultQualifiedResource.String())
}
if e.NewListFunc == nil {
return fmt.Errorf("store for %s must have NewListFunc set", e.DefaultQualifiedResource.String())
}
if (e.KeyRootFunc == nil) != (e.KeyFunc == nil) {
return fmt.Errorf("store for %s must set both KeyRootFunc and KeyFunc or neither", e.DefaultQualifiedResource.String())
}
var isNamespaced bool
switch {
case e.CreateStrategy != nil:
isNamespaced = e.CreateStrategy.NamespaceScoped()
case e.UpdateStrategy != nil:
isNamespaced = e.UpdateStrategy.NamespaceScoped()
default:
return fmt.Errorf("store for %s must have CreateStrategy or UpdateStrategy set", e.DefaultQualifiedResource.String())
}
if e.DeleteStrategy == nil {
return fmt.Errorf("store for %s must have DeleteStrategy set", e.DefaultQualifiedResource.String())
}
if options.RESTOptions == nil {
return fmt.Errorf("options for %s must have RESTOptions set", e.DefaultQualifiedResource.String())
}
attrFunc := options.AttrFunc
if attrFunc == nil {
if isNamespaced {
attrFunc = storage.DefaultNamespaceScopedAttr
} else {
attrFunc = storage.DefaultClusterScopedAttr
}
}
if e.PredicateFunc == nil {
e.PredicateFunc = func(label labels.Selector, field fields.Selector) storage.SelectionPredicate {
return storage.SelectionPredicate{
Label: label,
Field: field,
GetAttrs: attrFunc,
}
}
}
opts, err := options.RESTOptions.GetRESTOptions(e.DefaultQualifiedResource)
if err != nil {
return err
}
// ResourcePrefix must come from the underlying factory
prefix := opts.ResourcePrefix
if !strings.HasPrefix(prefix, "/") {
prefix = "/" + prefix
}
if prefix == "/" {
return fmt.Errorf("store for %s has an invalid prefix %q", e.DefaultQualifiedResource.String(), opts.ResourcePrefix)
}
// Set the default behavior for storage key generation
if e.KeyRootFunc == nil && e.KeyFunc == nil {
if isNamespaced {
e.KeyRootFunc = func(ctx genericapirequest.Context) string {
return NamespaceKeyRootFunc(ctx, prefix)
}
e.KeyFunc = func(ctx genericapirequest.Context, name string) (string, error) {
return NamespaceKeyFunc(ctx, prefix, name)
}
} else {
e.KeyRootFunc = func(ctx genericapirequest.Context) string {
return prefix
}
e.KeyFunc = func(ctx genericapirequest.Context, name string) (string, error) {
return NoNamespaceKeyFunc(ctx, prefix, name)
}
}
}
// We adapt the store's keyFunc so that we can use it with the StorageDecorator
// without making any assumptions about where objects are stored in etcd
keyFunc := func(obj runtime.Object) (string, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
return "", err
}
if isNamespaced {
return e.KeyFunc(genericapirequest.WithNamespace(genericapirequest.NewContext(), accessor.GetNamespace()), accessor.GetName())
}
return e.KeyFunc(genericapirequest.NewContext(), accessor.GetName())
}
triggerFunc := options.TriggerFunc
if triggerFunc == nil {
triggerFunc = storage.NoTriggerPublisher
}
if e.DeleteCollectionWorkers == 0 {
e.DeleteCollectionWorkers = opts.DeleteCollectionWorkers
}
e.EnableGarbageCollection = opts.EnableGarbageCollection
if e.ObjectNameFunc == nil {
e.ObjectNameFunc = func(obj runtime.Object) (string, error) {
accessor, err := meta.Accessor(obj)
if err != nil {
return "", err
}
return accessor.GetName(), nil
}
}
if e.Storage == nil {
e.Storage, e.DestroyFunc = opts.Decorator(
e.Copier,
opts.StorageConfig,
e.WatchCacheSize,
e.NewFunc(),
prefix,
keyFunc,
e.NewListFunc,
attrFunc,
triggerFunc,
)
}
return nil
}
func (e *Store) ConvertToTable(ctx genericapirequest.Context, object runtime.Object, tableOptions runtime.Object) (*metav1alpha1.Table, error) {
if e.TableConvertor != nil {
return e.TableConvertor.ConvertToTable(ctx, object, tableOptions)
}
return rest.NewDefaultTableConvertor(e.qualifiedResourceFromContext(ctx)).ConvertToTable(ctx, object, tableOptions)
}
|
// Copyright © 2017 The Things Network
// Use of this source code is governed by the MIT license that can be found in the LICENSE file.
package monitor
import (
"crypto/tls"
"io"
"strings"
"sync"
"github.com/TheThingsNetwork/go-utils/grpc/restartstream"
"github.com/TheThingsNetwork/go-utils/log"
"github.com/TheThingsNetwork/ttn/api"
"github.com/TheThingsNetwork/ttn/api/broker"
"github.com/TheThingsNetwork/ttn/api/gateway"
"github.com/TheThingsNetwork/ttn/api/router"
"github.com/golang/protobuf/ptypes/empty"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
)
// GenericStream is used for sending anything to the monitor.
// Depending on the context, this can be
// - *router.UplinkMessage
// - *router.DownlinkMessage
// - *gateway.Status
// - *broker.DeduplicatedUplinkMessage
// - *broker.DownlinkMessage
type GenericStream interface {
Send(interface{})
Close()
}
// ClientConfig for monitor Client
type ClientConfig struct {
BufferSize int
}
// DefaultClientConfig for monitor Client
var DefaultClientConfig = ClientConfig{
BufferSize: 10,
}
// TLSConfig to use
var TLSConfig *tls.Config
// NewClient creates a new Client with the given configuration
func NewClient(config ClientConfig) *Client {
ctx, cancel := context.WithCancel(context.Background())
return &Client{
log: log.Get(),
ctx: ctx,
cancel: cancel,
config: config,
}
}
// Client for monitor
type Client struct {
log log.Interface
ctx context.Context
cancel context.CancelFunc
config ClientConfig
serverConns []*serverConn
}
// DefaultDialOptions for connecting with a monitor server
var DefaultDialOptions = []grpc.DialOption{
grpc.WithBlock(),
grpc.FailOnNonTempDialError(false),
grpc.WithStreamInterceptor(restartstream.Interceptor(restartstream.DefaultSettings)),
}
// AddServer adds a new monitor server. Supplying DialOptions overrides the default dial options.
// If the default DialOptions are used, TLS will be used to connect to monitors with a "-tls" suffix in their name.
// This function should not be called after streams have been started
func (c *Client) AddServer(name, address string, opts ...grpc.DialOption) {
log := c.log.WithFields(log.Fields{"Monitor": name, "Address": address})
log.Info("Adding Monitor server")
s := &serverConn{
ctx: log,
name: name,
ready: make(chan struct{}),
}
c.serverConns = append(c.serverConns, s)
if len(opts) == 0 {
if strings.HasSuffix(name, "-tls") {
opts = append(DefaultDialOptions, grpc.WithTransportCredentials(credentials.NewTLS(TLSConfig)))
} else {
opts = append(DefaultDialOptions, grpc.WithInsecure())
}
}
go func() {
conn, err := grpc.DialContext(
c.ctx,
address,
opts...,
)
if err != nil {
log.WithError(err).Error("Could not connect to Monitor server")
close(s.ready)
return
}
s.conn = conn
close(s.ready)
}()
}
// AddConn adds a new monitor server on an existing connection
// This function should not be called after streams have been started
func (c *Client) AddConn(name string, conn *grpc.ClientConn) {
log := c.log.WithFields(log.Fields{"Monitor": name})
log.Info("Adding Monitor connection")
c.serverConns = append(c.serverConns, &serverConn{
ctx: log,
name: name,
conn: conn,
})
}
// Close the client and all its connections
func (c *Client) Close() {
c.cancel()
for _, server := range c.serverConns {
server.Close()
}
}
type serverConn struct {
ctx log.Interface
name string
ready chan struct{}
conn *grpc.ClientConn
}
func (c *serverConn) Close() {
if c.ready != nil {
<-c.ready
}
if c.conn != nil {
c.conn.Close()
}
}
type gatewayStreams struct {
log log.Interface
ctx context.Context
cancel context.CancelFunc
mu sync.RWMutex
uplink map[string]chan *router.UplinkMessage
downlink map[string]chan *router.DownlinkMessage
status map[string]chan *gateway.Status
}
func (s *gatewayStreams) Send(msg interface{}) {
s.mu.RLock()
defer s.mu.RUnlock()
switch msg := msg.(type) {
case *router.UplinkMessage:
s.log.Debug("Sending UplinkMessage to monitor")
for serverName, ch := range s.uplink {
select {
case ch <- msg:
default:
s.log.WithField("Monitor", serverName).Warn("UplinkMessage buffer full")
}
}
case *router.DownlinkMessage:
s.log.Debug("Sending DownlinkMessage to monitor")
for serverName, ch := range s.downlink {
select {
case ch <- msg:
default:
s.log.WithField("Monitor", serverName).Warn("DownlinkMessage buffer full")
}
}
case *gateway.Status:
s.log.Debug("Sending Status to monitor")
for serverName, ch := range s.status {
select {
case ch <- msg:
default:
s.log.WithField("Monitor", serverName).Warn("GatewayStatus buffer full")
}
}
}
}
func (s *gatewayStreams) Close() {
s.cancel()
}
// NewGatewayStreams returns new streams using the given gateway ID and token
func (c *Client) NewGatewayStreams(id string, token string) GenericStream {
log := c.log.WithField("GatewayID", id)
ctx, cancel := context.WithCancel(c.ctx)
ctx = api.ContextWithID(ctx, id)
ctx = api.ContextWithToken(ctx, token)
s := &gatewayStreams{
log: log,
ctx: ctx,
cancel: cancel,
uplink: make(map[string]chan *router.UplinkMessage),
downlink: make(map[string]chan *router.DownlinkMessage),
status: make(map[string]chan *gateway.Status),
}
// Hook up the monitor servers
for _, server := range c.serverConns {
go func(server *serverConn) {
if server.ready != nil {
select {
case <-ctx.Done():
return
case <-server.ready:
}
}
if server.conn == nil {
return
}
log := log.WithField("Monitor", server.name)
cli := NewMonitorClient(server.conn)
monitor := func(streamName string, stream grpc.ClientStream) {
err := stream.RecvMsg(new(empty.Empty))
switch {
case err == nil:
log.Debugf("%s stream closed", streamName)
case err == io.EOF:
log.WithError(err).Debugf("%s stream ended", streamName)
case err == context.Canceled || grpc.Code(err) == codes.Canceled:
log.WithError(err).Debugf("%s stream canceled", streamName)
case err == context.DeadlineExceeded || grpc.Code(err) == codes.DeadlineExceeded:
log.WithError(err).Debugf("%s stream deadline exceeded", streamName)
case grpc.ErrorDesc(err) == grpc.ErrClientConnClosing.Error():
log.WithError(err).Debugf("%s stream connection closed", streamName)
default:
log.WithError(err).Warnf("%s stream closed unexpectedly", streamName)
}
}
chUplink := make(chan *router.UplinkMessage, c.config.BufferSize)
chDownlink := make(chan *router.DownlinkMessage, c.config.BufferSize)
chStatus := make(chan *gateway.Status, c.config.BufferSize)
defer func() {
s.mu.Lock()
defer s.mu.Unlock()
delete(s.uplink, server.name)
delete(s.downlink, server.name)
delete(s.status, server.name)
close(chUplink)
close(chDownlink)
close(chStatus)
}()
// Uplink stream
uplink, err := cli.GatewayUplink(ctx)
if err != nil {
log.WithError(err).Warn("Could not set up GatewayUplink stream")
} else {
s.mu.Lock()
s.uplink[server.name] = chUplink
s.mu.Unlock()
go func() {
monitor("GatewayUplink", uplink)
s.mu.Lock()
defer s.mu.Unlock()
delete(s.uplink, server.name)
}()
}
// Downlink stream
downlink, err := cli.GatewayDownlink(ctx)
if err != nil {
log.WithError(err).Warn("Could not set up GatewayDownlink stream")
} else {
s.mu.Lock()
s.downlink[server.name] = chDownlink
s.mu.Unlock()
go func() {
monitor("GatewayDownlink", downlink)
s.mu.Lock()
defer s.mu.Unlock()
delete(s.downlink, server.name)
}()
}
// Status stream
status, err := cli.GatewayStatus(ctx)
if err != nil {
log.WithError(err).Warn("Could not set up GatewayStatus stream")
} else {
s.mu.Lock()
s.status[server.name] = chStatus
s.mu.Unlock()
go func() {
monitor("GatewayStatus", status)
s.mu.Lock()
defer s.mu.Unlock()
delete(s.status, server.name)
}()
}
log.Debug("Start handling Gateway streams")
defer log.Debug("Done handling Gateway streams")
for {
select {
case <-ctx.Done():
return
case msg := <-chStatus:
if err := status.Send(msg); err != nil {
log.WithError(err).Warn("Could not send GatewayStatus to monitor")
if err == restartstream.ErrStreamClosed {
return
}
}
case msg := <-chUplink:
if err := uplink.Send(msg); err != nil {
log.WithError(err).Warn("Could not send UplinkMessage to monitor")
if err == restartstream.ErrStreamClosed {
return
}
}
case msg := <-chDownlink:
if err := downlink.Send(msg); err != nil {
log.WithError(err).Warn("Could not send DownlinkMessage to monitor")
if err == restartstream.ErrStreamClosed {
return
}
}
}
}
}(server)
}
return s
}
type brokerStreams struct {
log log.Interface
ctx context.Context
cancel context.CancelFunc
mu sync.RWMutex
uplink map[string]chan *broker.DeduplicatedUplinkMessage
downlink map[string]chan *broker.DownlinkMessage
}
func (s *brokerStreams) Send(msg interface{}) {
s.mu.RLock()
defer s.mu.RUnlock()
switch msg := msg.(type) {
case *broker.DeduplicatedUplinkMessage:
s.log.Debug("Sending DeduplicatedUplinkMessage to monitor")
for serverName, ch := range s.uplink {
select {
case ch <- msg:
default:
s.log.WithField("Monitor", serverName).Warn("DeduplicatedUplinkMessage buffer full")
}
}
case *broker.DownlinkMessage:
s.log.Debug("Sending DownlinkMessage to monitor")
for serverName, ch := range s.downlink {
select {
case ch <- msg:
default:
s.log.WithField("Monitor", serverName).Warn("DownlinkMessage buffer full")
}
}
}
}
func (s *brokerStreams) Close() {
s.cancel()
}
// NewBrokerStreams returns new streams using the given broker ID and token
func (c *Client) NewBrokerStreams(id string, token string) GenericStream {
log := c.log
ctx, cancel := context.WithCancel(c.ctx)
ctx = api.ContextWithID(ctx, id)
ctx = api.ContextWithToken(ctx, token)
s := &brokerStreams{
log: log,
ctx: ctx,
cancel: cancel,
uplink: make(map[string]chan *broker.DeduplicatedUplinkMessage),
downlink: make(map[string]chan *broker.DownlinkMessage),
}
// Hook up the monitor servers
for _, server := range c.serverConns {
go func(server *serverConn) {
if server.ready != nil {
select {
case <-ctx.Done():
return
case <-server.ready:
}
}
if server.conn == nil {
return
}
log := log.WithField("Monitor", server.name)
cli := NewMonitorClient(server.conn)
monitor := func(streamName string, stream grpc.ClientStream) {
err := stream.RecvMsg(new(empty.Empty))
switch {
case err == nil:
log.Debugf("%s stream closed", streamName)
case err == io.EOF:
log.WithError(err).Debugf("%s stream ended", streamName)
case err == context.Canceled || grpc.Code(err) == codes.Canceled:
log.WithError(err).Debugf("%s stream canceled", streamName)
case err == context.DeadlineExceeded || grpc.Code(err) == codes.DeadlineExceeded:
log.WithError(err).Debugf("%s stream deadline exceeded", streamName)
case grpc.ErrorDesc(err) == grpc.ErrClientConnClosing.Error():
log.WithError(err).Debugf("%s stream connection closed", streamName)
default:
log.WithError(err).Warnf("%s stream closed unexpectedly", streamName)
}
}
chUplink := make(chan *broker.DeduplicatedUplinkMessage, c.config.BufferSize)
chDownlink := make(chan *broker.DownlinkMessage, c.config.BufferSize)
defer func() {
close(chUplink)
close(chDownlink)
}()
// Uplink stream
uplink, err := cli.BrokerUplink(ctx)
if err != nil {
log.WithError(err).Warn("Could not set up BrokerUplink stream")
} else {
s.mu.Lock()
s.uplink[server.name] = chUplink
s.mu.Unlock()
go func() {
monitor("BrokerUplink", uplink)
s.mu.Lock()
defer s.mu.Unlock()
delete(s.uplink, server.name)
}()
}
// Downlink stream
downlink, err := cli.BrokerDownlink(ctx)
if err != nil {
log.WithError(err).Warn("Could not set up BrokerDownlink stream")
} else {
s.mu.Lock()
s.downlink[server.name] = chDownlink
s.mu.Unlock()
go func() {
monitor("BrokerDownlink", downlink)
s.mu.Lock()
defer s.mu.Unlock()
delete(s.downlink, server.name)
}()
}
log.Debug("Start handling Broker streams")
defer log.Debug("Done handling Broker streams")
for {
select {
case <-ctx.Done():
return
case msg := <-chUplink:
if err := uplink.Send(msg); err != nil {
log.WithError(err).Warn("Could not send UplinkMessage to monitor")
return
}
case msg := <-chDownlink:
if err := downlink.Send(msg); err != nil {
log.WithError(err).Warn("Could not send DownlinkMessage to monitor")
return
}
}
}
}(server)
}
return s
}
Don't return on gRPC errors in send, disable streams before closing
// Copyright © 2017 The Things Network
// Use of this source code is governed by the MIT license that can be found in the LICENSE file.
package monitor
import (
"crypto/tls"
"io"
"strings"
"sync"
"github.com/TheThingsNetwork/go-utils/grpc/restartstream"
"github.com/TheThingsNetwork/go-utils/log"
"github.com/TheThingsNetwork/ttn/api"
"github.com/TheThingsNetwork/ttn/api/broker"
"github.com/TheThingsNetwork/ttn/api/gateway"
"github.com/TheThingsNetwork/ttn/api/router"
"github.com/golang/protobuf/ptypes/empty"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
)
// GenericStream is used for sending anything to the monitor.
// Depending on the context, this can be
// - *router.UplinkMessage
// - *router.DownlinkMessage
// - *gateway.Status
// - *broker.DeduplicatedUplinkMessage
// - *broker.DownlinkMessage
type GenericStream interface {
Send(interface{})
Close()
}
// ClientConfig for monitor Client
type ClientConfig struct {
BufferSize int
}
// DefaultClientConfig for monitor Client
var DefaultClientConfig = ClientConfig{
BufferSize: 10,
}
// TLSConfig to use
var TLSConfig *tls.Config
// NewClient creates a new Client with the given configuration
func NewClient(config ClientConfig) *Client {
ctx, cancel := context.WithCancel(context.Background())
return &Client{
log: log.Get(),
ctx: ctx,
cancel: cancel,
config: config,
}
}
// Client for monitor
type Client struct {
log log.Interface
ctx context.Context
cancel context.CancelFunc
config ClientConfig
serverConns []*serverConn
}
// DefaultDialOptions for connecting with a monitor server
var DefaultDialOptions = []grpc.DialOption{
grpc.WithBlock(),
grpc.FailOnNonTempDialError(false),
grpc.WithStreamInterceptor(restartstream.Interceptor(restartstream.DefaultSettings)),
}
// AddServer adds a new monitor server. Supplying DialOptions overrides the default dial options.
// If the default DialOptions are used, TLS will be used to connect to monitors with a "-tls" suffix in their name.
// This function should not be called after streams have been started
func (c *Client) AddServer(name, address string, opts ...grpc.DialOption) {
log := c.log.WithFields(log.Fields{"Monitor": name, "Address": address})
log.Info("Adding Monitor server")
s := &serverConn{
ctx: log,
name: name,
ready: make(chan struct{}),
}
c.serverConns = append(c.serverConns, s)
if len(opts) == 0 {
if strings.HasSuffix(name, "-tls") {
opts = append(DefaultDialOptions, grpc.WithTransportCredentials(credentials.NewTLS(TLSConfig)))
} else {
opts = append(DefaultDialOptions, grpc.WithInsecure())
}
}
go func() {
conn, err := grpc.DialContext(
c.ctx,
address,
opts...,
)
if err != nil {
log.WithError(err).Error("Could not connect to Monitor server")
close(s.ready)
return
}
s.conn = conn
close(s.ready)
}()
}
// AddConn adds a new monitor server on an existing connection
// This function should not be called after streams have been started
func (c *Client) AddConn(name string, conn *grpc.ClientConn) {
log := c.log.WithFields(log.Fields{"Monitor": name})
log.Info("Adding Monitor connection")
c.serverConns = append(c.serverConns, &serverConn{
ctx: log,
name: name,
conn: conn,
})
}
// Close the client and all its connections
func (c *Client) Close() {
c.cancel()
for _, server := range c.serverConns {
server.Close()
}
}
type serverConn struct {
ctx log.Interface
name string
ready chan struct{}
conn *grpc.ClientConn
}
func (c *serverConn) Close() {
if c.ready != nil {
<-c.ready
}
if c.conn != nil {
c.conn.Close()
}
}
type gatewayStreams struct {
log log.Interface
ctx context.Context
cancel context.CancelFunc
mu sync.RWMutex
uplink map[string]chan *router.UplinkMessage
downlink map[string]chan *router.DownlinkMessage
status map[string]chan *gateway.Status
}
func (s *gatewayStreams) Send(msg interface{}) {
s.mu.RLock()
defer s.mu.RUnlock()
switch msg := msg.(type) {
case *router.UplinkMessage:
s.log.Debug("Sending UplinkMessage to monitor")
for serverName, ch := range s.uplink {
select {
case ch <- msg:
default:
s.log.WithField("Monitor", serverName).Warn("UplinkMessage buffer full")
}
}
case *router.DownlinkMessage:
s.log.Debug("Sending DownlinkMessage to monitor")
for serverName, ch := range s.downlink {
select {
case ch <- msg:
default:
s.log.WithField("Monitor", serverName).Warn("DownlinkMessage buffer full")
}
}
case *gateway.Status:
s.log.Debug("Sending Status to monitor")
for serverName, ch := range s.status {
select {
case ch <- msg:
default:
s.log.WithField("Monitor", serverName).Warn("GatewayStatus buffer full")
}
}
}
}
func (s *gatewayStreams) Close() {
s.cancel()
}
// NewGatewayStreams returns new streams using the given gateway ID and token
func (c *Client) NewGatewayStreams(id string, token string) GenericStream {
log := c.log.WithField("GatewayID", id)
ctx, cancel := context.WithCancel(c.ctx)
ctx = api.ContextWithID(ctx, id)
ctx = api.ContextWithToken(ctx, token)
s := &gatewayStreams{
log: log,
ctx: ctx,
cancel: cancel,
uplink: make(map[string]chan *router.UplinkMessage),
downlink: make(map[string]chan *router.DownlinkMessage),
status: make(map[string]chan *gateway.Status),
}
// Hook up the monitor servers
for _, server := range c.serverConns {
go func(server *serverConn) {
if server.ready != nil {
select {
case <-ctx.Done():
return
case <-server.ready:
}
}
if server.conn == nil {
return
}
log := log.WithField("Monitor", server.name)
cli := NewMonitorClient(server.conn)
monitor := func(streamName string, stream grpc.ClientStream) {
err := stream.RecvMsg(new(empty.Empty))
switch {
case err == nil:
log.Debugf("%s stream closed", streamName)
case err == io.EOF:
log.WithError(err).Debugf("%s stream ended", streamName)
case err == context.Canceled || grpc.Code(err) == codes.Canceled:
log.WithError(err).Debugf("%s stream canceled", streamName)
case err == context.DeadlineExceeded || grpc.Code(err) == codes.DeadlineExceeded:
log.WithError(err).Debugf("%s stream deadline exceeded", streamName)
case grpc.ErrorDesc(err) == grpc.ErrClientConnClosing.Error():
log.WithError(err).Debugf("%s stream connection closed", streamName)
default:
log.WithError(err).Warnf("%s stream closed unexpectedly", streamName)
}
}
chUplink := make(chan *router.UplinkMessage, c.config.BufferSize)
chDownlink := make(chan *router.DownlinkMessage, c.config.BufferSize)
chStatus := make(chan *gateway.Status, c.config.BufferSize)
defer func() {
s.mu.Lock()
defer s.mu.Unlock()
delete(s.uplink, server.name)
delete(s.downlink, server.name)
delete(s.status, server.name)
close(chUplink)
close(chDownlink)
close(chStatus)
}()
// Uplink stream
uplink, err := cli.GatewayUplink(ctx)
if err != nil {
log.WithError(err).Warn("Could not set up GatewayUplink stream")
} else {
s.mu.Lock()
s.uplink[server.name] = chUplink
s.mu.Unlock()
go func() {
monitor("GatewayUplink", uplink)
s.mu.Lock()
defer s.mu.Unlock()
delete(s.uplink, server.name)
}()
}
// Downlink stream
downlink, err := cli.GatewayDownlink(ctx)
if err != nil {
log.WithError(err).Warn("Could not set up GatewayDownlink stream")
} else {
s.mu.Lock()
s.downlink[server.name] = chDownlink
s.mu.Unlock()
go func() {
monitor("GatewayDownlink", downlink)
s.mu.Lock()
defer s.mu.Unlock()
delete(s.downlink, server.name)
}()
}
// Status stream
status, err := cli.GatewayStatus(ctx)
if err != nil {
log.WithError(err).Warn("Could not set up GatewayStatus stream")
} else {
s.mu.Lock()
s.status[server.name] = chStatus
s.mu.Unlock()
go func() {
monitor("GatewayStatus", status)
s.mu.Lock()
defer s.mu.Unlock()
delete(s.status, server.name)
}()
}
log.Debug("Start handling Gateway streams")
defer log.Debug("Done handling Gateway streams")
for {
select {
case <-ctx.Done():
return
case msg := <-chStatus:
if err := status.Send(msg); err != nil {
log.WithError(err).Warn("Could not send GatewayStatus to monitor")
if err == restartstream.ErrStreamClosed {
return
}
}
case msg := <-chUplink:
if err := uplink.Send(msg); err != nil {
log.WithError(err).Warn("Could not send UplinkMessage to monitor")
if err == restartstream.ErrStreamClosed {
return
}
}
case msg := <-chDownlink:
if err := downlink.Send(msg); err != nil {
log.WithError(err).Warn("Could not send DownlinkMessage to monitor")
if err == restartstream.ErrStreamClosed {
return
}
}
}
}
}(server)
}
return s
}
type brokerStreams struct {
log log.Interface
ctx context.Context
cancel context.CancelFunc
mu sync.RWMutex
uplink map[string]chan *broker.DeduplicatedUplinkMessage
downlink map[string]chan *broker.DownlinkMessage
}
func (s *brokerStreams) Send(msg interface{}) {
s.mu.RLock()
defer s.mu.RUnlock()
switch msg := msg.(type) {
case *broker.DeduplicatedUplinkMessage:
s.log.Debug("Sending DeduplicatedUplinkMessage to monitor")
for serverName, ch := range s.uplink {
select {
case ch <- msg:
default:
s.log.WithField("Monitor", serverName).Warn("DeduplicatedUplinkMessage buffer full")
}
}
case *broker.DownlinkMessage:
s.log.Debug("Sending DownlinkMessage to monitor")
for serverName, ch := range s.downlink {
select {
case ch <- msg:
default:
s.log.WithField("Monitor", serverName).Warn("DownlinkMessage buffer full")
}
}
}
}
func (s *brokerStreams) Close() {
s.cancel()
}
// NewBrokerStreams returns new streams using the given broker ID and token
func (c *Client) NewBrokerStreams(id string, token string) GenericStream {
log := c.log
ctx, cancel := context.WithCancel(c.ctx)
ctx = api.ContextWithID(ctx, id)
ctx = api.ContextWithToken(ctx, token)
s := &brokerStreams{
log: log,
ctx: ctx,
cancel: cancel,
uplink: make(map[string]chan *broker.DeduplicatedUplinkMessage),
downlink: make(map[string]chan *broker.DownlinkMessage),
}
// Hook up the monitor servers
for _, server := range c.serverConns {
go func(server *serverConn) {
if server.ready != nil {
select {
case <-ctx.Done():
return
case <-server.ready:
}
}
if server.conn == nil {
return
}
log := log.WithField("Monitor", server.name)
cli := NewMonitorClient(server.conn)
monitor := func(streamName string, stream grpc.ClientStream) {
err := stream.RecvMsg(new(empty.Empty))
switch {
case err == nil:
log.Debugf("%s stream closed", streamName)
case err == io.EOF:
log.WithError(err).Debugf("%s stream ended", streamName)
case err == context.Canceled || grpc.Code(err) == codes.Canceled:
log.WithError(err).Debugf("%s stream canceled", streamName)
case err == context.DeadlineExceeded || grpc.Code(err) == codes.DeadlineExceeded:
log.WithError(err).Debugf("%s stream deadline exceeded", streamName)
case grpc.ErrorDesc(err) == grpc.ErrClientConnClosing.Error():
log.WithError(err).Debugf("%s stream connection closed", streamName)
default:
log.WithError(err).Warnf("%s stream closed unexpectedly", streamName)
}
}
chUplink := make(chan *broker.DeduplicatedUplinkMessage, c.config.BufferSize)
chDownlink := make(chan *broker.DownlinkMessage, c.config.BufferSize)
defer func() {
s.mu.Lock()
defer s.mu.Unlock()
delete(s.uplink, server.name)
delete(s.downlink, server.name)
close(chUplink)
close(chDownlink)
}()
// Uplink stream
uplink, err := cli.BrokerUplink(ctx)
if err != nil {
log.WithError(err).Warn("Could not set up BrokerUplink stream")
} else {
s.mu.Lock()
s.uplink[server.name] = chUplink
s.mu.Unlock()
go func() {
monitor("BrokerUplink", uplink)
s.mu.Lock()
defer s.mu.Unlock()
delete(s.uplink, server.name)
}()
}
// Downlink stream
downlink, err := cli.BrokerDownlink(ctx)
if err != nil {
log.WithError(err).Warn("Could not set up BrokerDownlink stream")
} else {
s.mu.Lock()
s.downlink[server.name] = chDownlink
s.mu.Unlock()
go func() {
monitor("BrokerDownlink", downlink)
s.mu.Lock()
defer s.mu.Unlock()
delete(s.downlink, server.name)
}()
}
log.Debug("Start handling Broker streams")
defer log.Debug("Done handling Broker streams")
for {
select {
case <-ctx.Done():
return
case msg := <-chUplink:
if err := uplink.Send(msg); err != nil {
log.WithError(err).Warn("Could not send UplinkMessage to monitor")
if err == restartstream.ErrStreamClosed {
return
}
}
case msg := <-chDownlink:
if err := downlink.Send(msg); err != nil {
log.WithError(err).Warn("Could not send DownlinkMessage to monitor")
if err == restartstream.ErrStreamClosed {
return
}
}
}
}
}(server)
}
return s
}
|
package version
// Version of Functions
var Version = "0.3.245"
fnserver: 0.3.246 release [skip ci]
package version
// Version of Functions
var Version = "0.3.246"
|
package consul
import (
"bytes"
"fmt"
"net/http"
"strings"
"sync"
"time"
"github.com/hashicorp/consul/api"
"github.com/portworx/kvdb"
"github.com/sirupsen/logrus"
)
const (
// httpError is a substring returned by consul during such http errors.
// Ideally such errors should be provided as consul constants
httpError = "Unexpected response code: 500"
// eofError is also a substring returned by consul during EOF errors.
eofError = "EOF"
// connRefused connection refused
connRefused = "connection refused"
// keyIndexMismatch indicates consul error for key index mismatch
keyIndexMismatch = "Key Index mismatch"
// nameResolutionError indicates no host found, can be temporary
nameResolutionError = "no such host"
)
// clientConsul defines methods that a px based consul client should satisfy.
type consulClient interface {
kvOperations
// sessionOperations includes methods methods from that interface.
sessionOperations
// metaOperations includes methods from that interface.
metaOperations
// lockOptsOperations includes methods from that interface.
lockOptsOperations
}
type kvOperations interface {
// Get exposes underlying KV().Get but with reconnect on failover.
Get(key string, q *api.QueryOptions) (*api.KVPair, *api.QueryMeta, error)
// Put exposes underlying KV().Put but with reconnect on failover.
Put(p *api.KVPair, q *api.WriteOptions) (*api.WriteMeta, error)
// Acquire exposes underlying KV().Acquire but with reconnect on failover.
Acquire(p *api.KVPair, q *api.WriteOptions) (*api.WriteMeta, error)
// Delete exposes underlying KV().Delete but with reconnect on failover.
Delete(key string, w *api.WriteOptions) (*api.WriteMeta, error)
// DeleteTree exposes underlying KV().DeleteTree but with reconnect on failover.
DeleteTree(prefix string, w *api.WriteOptions) (*api.WriteMeta, error)
// Keys exposes underlying KV().Keys but with reconnect on failover.
Keys(prefix, separator string, q *api.QueryOptions) ([]string, *api.QueryMeta, error)
// List exposes underlying KV().List but with reconnect on failover.
List(prefix string, q *api.QueryOptions) (api.KVPairs, *api.QueryMeta, error)
}
type sessionOperations interface {
// Create exposes underlying Session().Create but with reconnect on failover.
Create(se *api.SessionEntry, q *api.WriteOptions) (string, *api.WriteMeta, error)
// Destroy exposes underlying Session().Destroy but with reconnect on failover.
Destroy(id string, q *api.WriteOptions) (*api.WriteMeta, error)
// Renew exposes underlying Session().Renew but with reconnect on failover.
Renew(id string, q *api.WriteOptions) (*api.SessionEntry, *api.WriteMeta, error)
// RenewPeriodic exposes underlying Session().RenewPeriodic but with reconnect on failover.
RenewPeriodic(initialTTL string, id string, q *api.WriteOptions, doneCh chan struct{}) error
}
type metaOperations interface {
// CreateMeta is a meta writer wrapping KV().Acquire and Session().Destroy but with reconnect on failover.
CreateMeta(id string, p *api.KVPair, q *api.WriteOptions) (*api.WriteMeta, bool, error)
// CompareAndSet is a meta func wrapping KV().CAS and KV().Get but with reconnect on failover.
CompareAndSet(id string, value []byte, p *api.KVPair, q *api.WriteOptions) (bool, *api.WriteMeta, error)
// CompareAndDelete is a meta func wrapping KV().DeleteCAS and KV().Get but with reconnect on failover.
CompareAndDelete(id string, value []byte, p *api.KVPair, q *api.WriteOptions) (bool, *api.WriteMeta, error)
}
type lockOptsOperations interface {
// LockOpts returns pointer to underlying Lock object and an error.
LockOpts(opts *api.LockOptions) (*api.Lock, error)
}
// consulConnection stores current consul connection state
type consulConnection struct {
// config is the configuration used to create consulClient
config *api.Config
// client provides access to consul api
client *api.Client
// once is used to reconnect consulClient only once among concurrently running threads
once *sync.Once
}
// consulClient wraps config information and consul client along with sync functionality to reconnect it once.
// consulClient also satisfies interface defined above.
type consulClientImpl struct {
// conn current consul connection state
conn *consulConnection
// connParams holds all params required to obtain new api client
connParams connectionParams
// reconnectDelay is the time duration to wait between machines
reconnectDelay time.Duration
// maxRetries is the number of times reconnect should be tried.
maxRetries int
}
// newConsulClient provides an instance of clientConsul interface.
func newConsulClient(config *api.Config,
client *api.Client,
reconnectDelay time.Duration,
p connectionParams,
) consulClient {
c := &consulClientImpl{
conn: &consulConnection{
config: config,
client: client,
once: new(sync.Once)},
connParams: p,
reconnectDelay: reconnectDelay,
}
c.maxRetries = 12 /// with default 5 second delay this would be a minute
return c
}
// LockOpts returns pointer to underlying Lock object and an error.
func (c *consulClientImpl) LockOpts(opts *api.LockOptions) (*api.Lock, error) {
return c.conn.client.LockOpts(opts)
}
// reconnect reconnectes to any online and healthy consul server..
func (c *consulClientImpl) reconnect(conn *consulConnection) error {
var err error
// once.Do executes func() only once across concurrently executing threads
conn.once.Do(func() {
var config *api.Config
var client *api.Client
for _, machine := range c.connParams.machines {
if strings.HasPrefix(machine, "http://") {
machine = strings.TrimPrefix(machine, "http://")
} else if strings.HasPrefix(machine, "https://") {
machine = strings.TrimPrefix(machine, "https://")
}
// sleep for requested delay before testing new connection
time.Sleep(c.reconnectDelay)
if config, client, err = newKvClient(machine, c.connParams); err == nil {
c.conn = &consulConnection{
client: client,
config: config,
once: new(sync.Once),
}
logrus.Infof("%s: %s\n", "successfully connected to", machine)
break
} else {
logrus.Errorf("failed to reconnect client on: %s", machine)
}
}
})
if err != nil {
logrus.Infof("Failed to reconnect client: %v", err)
}
return err
}
// isConsulErrNeedingRetry is a type of consul error on which we should try reconnecting consul client.
func isConsulErrNeedingRetry(err error) bool {
return strings.Contains(err.Error(), httpError) ||
strings.Contains(err.Error(), eofError) ||
strings.Contains(err.Error(), connRefused) ||
strings.Contains(err.Error(), nameResolutionError)
}
// isKeyIndexMismatchErr returns true if error contains key index mismatch substring
func isKeyIndexMismatchErr(err error) bool {
return strings.Contains(err.Error(), keyIndexMismatch)
}
// newKvClient constructs new kvdb.Kvdb given a single end-point to connect to.
func newKvClient(machine string, p connectionParams) (*api.Config, *api.Client, error) {
config := api.DefaultConfig()
config.HttpClient = http.DefaultClient
config.Address = machine
config.Scheme = "http"
config.Token = p.options[kvdb.ACLTokenKey]
// check if TLS is required
if p.options[kvdb.TransportScheme] == "https" {
tlsConfig := &api.TLSConfig{
CAFile: p.options[kvdb.CAFileKey],
CertFile: p.options[kvdb.CertFileKey],
KeyFile: p.options[kvdb.CertKeyFileKey],
Address: p.options[kvdb.CAAuthAddress],
InsecureSkipVerify: strings.ToLower(p.options[kvdb.InsecureSkipVerify]) == "true",
}
consulTLSConfig, err := api.SetupTLSConfig(tlsConfig)
if err != nil {
logrus.Fatal(err)
}
config.Scheme = p.options[kvdb.TransportScheme]
config.HttpClient = new(http.Client)
config.HttpClient.Transport = &http.Transport{
TLSClientConfig: consulTLSConfig,
}
}
client, err := api.NewClient(config)
if err != nil {
logrus.Info("consul: failed to get new api client: %v", err)
return nil, nil, err
}
// check health to ensure communication with consul are working
if _, _, err := client.Health().State(api.HealthAny, nil); err != nil {
logrus.Errorf("consul: health check failed for %v : %v", machine, err)
return nil, nil, err
}
return config, client, nil
}
// consulFunc runs a consulFunc operation and returns true if needs to be retried
type consulFunc func() bool
// runWithRetry runs consulFunc with retries if required
func (c *consulClientImpl) runWithRetry(f consulFunc) {
for i := 0; i < c.maxRetries; i++ {
if !f() {
break
}
}
}
// writeFunc defines an update operation for consul with this signature
type writeFunc func(conn *consulConnection) (*api.WriteMeta, error)
// writeRetryFunc runs writeFunc with retries if required
func (c *consulClientImpl) writeRetryFunc(f writeFunc) (*api.WriteMeta, error) {
var err error
var meta *api.WriteMeta
retry := false
c.runWithRetry(func() bool {
conn := c.conn
meta, err = f(conn)
retry, err = c.reconnectIfConnectionError(conn, err)
return retry
})
return meta, err
}
/// reconnectIfConnectionError returns (retry, error), retry is true is client reconnected
func (c *consulClientImpl) reconnectIfConnectionError(conn *consulConnection, err error) (bool, error) {
if err == nil {
return false, nil
} else if isConsulErrNeedingRetry(err) {
logrus.Errorf("consul connection error: %v, trying to reconnect..", err)
if clientErr := c.reconnect(conn); clientErr != nil {
return false, clientErr
} else {
return true, nil
}
} else {
return false, err
}
}
func (c *consulClientImpl) Get(key string, q *api.QueryOptions) (*api.KVPair, *api.QueryMeta, error) {
var pair *api.KVPair
var meta *api.QueryMeta
var err error
retry := false
c.runWithRetry(func() bool {
conn := c.conn
pair, meta, err = conn.client.KV().Get(key, q)
retry, err = c.reconnectIfConnectionError(conn, err)
return retry
})
return pair, meta, err
}
func (c *consulClientImpl) Put(p *api.KVPair, q *api.WriteOptions) (*api.WriteMeta, error) {
return c.writeRetryFunc(func(conn *consulConnection) (*api.WriteMeta, error) {
return conn.client.KV().Put(p, q)
})
}
func (c *consulClientImpl) Delete(key string, w *api.WriteOptions) (*api.WriteMeta, error) {
return c.writeRetryFunc(func(conn *consulConnection) (*api.WriteMeta, error) {
return conn.client.KV().Delete(key, w)
})
}
func (c *consulClientImpl) DeleteTree(prefix string, w *api.WriteOptions) (*api.WriteMeta, error) {
return c.writeRetryFunc(func(conn *consulConnection) (*api.WriteMeta, error) {
return conn.client.KV().DeleteTree(prefix, w)
})
}
func (c *consulClientImpl) Keys(prefix, separator string, q *api.QueryOptions) ([]string, *api.QueryMeta, error) {
var list []string
var meta *api.QueryMeta
var err error
retry := false
c.runWithRetry(func() bool {
conn := c.conn
list, meta, err = conn.client.KV().Keys(prefix, separator, q)
retry, err = c.reconnectIfConnectionError(conn, err)
return retry
})
return list, meta, err
}
func (c *consulClientImpl) List(prefix string, q *api.QueryOptions) (api.KVPairs, *api.QueryMeta, error) {
var pairs api.KVPairs
var meta *api.QueryMeta
var err error
retry := false
c.runWithRetry(func() bool {
conn := c.conn
pairs, meta, err = conn.client.KV().List(prefix, q)
retry, err = c.reconnectIfConnectionError(conn, err)
return retry
})
return pairs, meta, err
}
func (c *consulClientImpl) Acquire(p *api.KVPair, q *api.WriteOptions) (*api.WriteMeta, error) {
var err error
var meta *api.WriteMeta
var ok bool
retry := false
c.runWithRetry(func() bool {
conn := c.conn
ok, meta, err = conn.client.KV().Acquire(p, q)
retry, err = c.reconnectIfConnectionError(conn, err)
return retry
})
// *** this error is created in loop above
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("acquire failed")
}
return meta, err
}
func (c *consulClientImpl) Create(se *api.SessionEntry, q *api.WriteOptions) (string, *api.WriteMeta, error) {
var session string
var meta *api.WriteMeta
var err error
retry := false
c.runWithRetry(func() bool {
conn := c.conn
session, meta, err = conn.client.Session().Create(se, q)
retry, err = c.reconnectIfConnectionError(conn, err)
return retry
})
return session, meta, err
}
func (c *consulClientImpl) Destroy(id string, q *api.WriteOptions) (*api.WriteMeta, error) {
return c.writeRetryFunc(func(conn *consulConnection) (*api.WriteMeta, error) {
return conn.client.Session().Destroy(id, q)
})
}
func (c *consulClientImpl) Renew(id string, q *api.WriteOptions) (*api.SessionEntry, *api.WriteMeta, error) {
var entry *api.SessionEntry
var meta *api.WriteMeta
var err error
retry := false
c.runWithRetry(func() bool {
conn := c.conn
entry, meta, err = conn.client.Session().Renew(id, q)
retry, err = c.reconnectIfConnectionError(conn, err)
return retry
})
return entry, meta, err
}
func (c *consulClientImpl) RenewPeriodic(
initialTTL string,
id string,
q *api.WriteOptions,
doneCh chan struct{},
) error {
var err error
retry := false
c.runWithRetry(func() bool {
conn := c.conn
err = conn.client.Session().RenewPeriodic(initialTTL, id, q, doneCh)
retry, err = c.reconnectIfConnectionError(conn, err)
return retry
})
return err
}
func (c *consulClientImpl) CreateMeta(
id string,
p *api.KVPair,
q *api.WriteOptions,
) (*api.WriteMeta, bool, error) {
var ok bool
var meta *api.WriteMeta
var err error
connError := false
for i := 0; i < c.maxRetries; i++ {
conn := c.conn
ok, meta, err = conn.client.KV().Acquire(p, q)
if ok && err == nil {
return nil, ok, err
}
if _, err := conn.client.Session().Destroy(p.Session, nil); err != nil {
logrus.Error(err)
}
if _, err := c.Delete(id, nil); err != nil {
logrus.Error(err)
}
connError, err = c.reconnectIfConnectionError(conn, err)
if connError {
continue
} else {
break
}
}
if !ok {
return nil, ok, fmt.Errorf("failed to set ttl: %v", err)
}
return meta, ok, err
}
func (c *consulClientImpl) CompareAndSet(
id string,
value []byte,
p *api.KVPair,
q *api.WriteOptions,
) (bool, *api.WriteMeta, error) {
var ok bool
var meta *api.WriteMeta
var err error
retried := false
connError := false
for i := 0; i < c.maxRetries; i++ {
conn := c.conn
ok, meta, err = conn.client.KV().CAS(p, q)
connError, err = c.reconnectIfConnectionError(conn, err)
if connError {
retried = true
continue
} else if err != nil && isKeyIndexMismatchErr(err) && retried {
kvPair, _, getErr := conn.client.KV().Get(id, nil)
if getErr != nil {
// failed to get value from kvdb
return false, nil, err
}
// Prev Value not equal to current value in consul
if bytes.Compare(kvPair.Value, value) != 0 {
return false, nil, err
} else {
// kvdb has the new value that we are trying to set
err = nil
break
}
} else {
break
}
}
return ok, meta, err
}
func (c *consulClientImpl) CompareAndDelete(
id string,
value []byte,
p *api.KVPair,
q *api.WriteOptions,
) (bool, *api.WriteMeta, error) {
var ok bool
var meta *api.WriteMeta
var err error
retried := false
connError := false
for i := 0; i < c.maxRetries; i++ {
conn := c.conn
ok, meta, err = conn.client.KV().DeleteCAS(p, q)
connError, err = c.reconnectIfConnectionError(conn, err)
if connError {
retried = true
continue
} else if retried && err == kvdb.ErrNotFound {
// assuming our delete went through, there is no way
// to figure out who deleted it
err = nil
break
} else {
break
}
}
return ok, meta, err
}
PWX-9227 PX panics with 'invalid memory address or nil pointer dereference' when consul cluster goes down
The meta values is returned as nil when consul loses quorum, this causes
a SegFault when we try to dereference the meta pointer to get the lastIndex.
This, was happening because we were overwriting the error in the client.go code
where we retryConnection(), thus ignoring the actual error that we got while accessing consul.
Signed-off-by: Tapas Sharma <ef6ee90f148b1f6545f2b0adfe47e9523a8fba52@portworx.com>
package consul
import (
"bytes"
"fmt"
"net/http"
"strings"
"sync"
"time"
"github.com/hashicorp/consul/api"
"github.com/portworx/kvdb"
"github.com/sirupsen/logrus"
)
const (
// httpError is a substring returned by consul during such http errors.
// Ideally such errors should be provided as consul constants
httpError = "Unexpected response code: 500"
// eofError is also a substring returned by consul during EOF errors.
eofError = "EOF"
// connRefused connection refused
connRefused = "connection refused"
// keyIndexMismatch indicates consul error for key index mismatch
keyIndexMismatch = "Key Index mismatch"
// nameResolutionError indicates no host found, can be temporary
nameResolutionError = "no such host"
)
// clientConsul defines methods that a px based consul client should satisfy.
type consulClient interface {
kvOperations
// sessionOperations includes methods methods from that interface.
sessionOperations
// metaOperations includes methods from that interface.
metaOperations
// lockOptsOperations includes methods from that interface.
lockOptsOperations
}
type kvOperations interface {
// Get exposes underlying KV().Get but with reconnect on failover.
Get(key string, q *api.QueryOptions) (*api.KVPair, *api.QueryMeta, error)
// Put exposes underlying KV().Put but with reconnect on failover.
Put(p *api.KVPair, q *api.WriteOptions) (*api.WriteMeta, error)
// Acquire exposes underlying KV().Acquire but with reconnect on failover.
Acquire(p *api.KVPair, q *api.WriteOptions) (*api.WriteMeta, error)
// Delete exposes underlying KV().Delete but with reconnect on failover.
Delete(key string, w *api.WriteOptions) (*api.WriteMeta, error)
// DeleteTree exposes underlying KV().DeleteTree but with reconnect on failover.
DeleteTree(prefix string, w *api.WriteOptions) (*api.WriteMeta, error)
// Keys exposes underlying KV().Keys but with reconnect on failover.
Keys(prefix, separator string, q *api.QueryOptions) ([]string, *api.QueryMeta, error)
// List exposes underlying KV().List but with reconnect on failover.
List(prefix string, q *api.QueryOptions) (api.KVPairs, *api.QueryMeta, error)
}
type sessionOperations interface {
// Create exposes underlying Session().Create but with reconnect on failover.
Create(se *api.SessionEntry, q *api.WriteOptions) (string, *api.WriteMeta, error)
// Destroy exposes underlying Session().Destroy but with reconnect on failover.
Destroy(id string, q *api.WriteOptions) (*api.WriteMeta, error)
// Renew exposes underlying Session().Renew but with reconnect on failover.
Renew(id string, q *api.WriteOptions) (*api.SessionEntry, *api.WriteMeta, error)
// RenewPeriodic exposes underlying Session().RenewPeriodic but with reconnect on failover.
RenewPeriodic(initialTTL string, id string, q *api.WriteOptions, doneCh chan struct{}) error
}
type metaOperations interface {
// CreateMeta is a meta writer wrapping KV().Acquire and Session().Destroy but with reconnect on failover.
CreateMeta(id string, p *api.KVPair, q *api.WriteOptions) (*api.WriteMeta, bool, error)
// CompareAndSet is a meta func wrapping KV().CAS and KV().Get but with reconnect on failover.
CompareAndSet(id string, value []byte, p *api.KVPair, q *api.WriteOptions) (bool, *api.WriteMeta, error)
// CompareAndDelete is a meta func wrapping KV().DeleteCAS and KV().Get but with reconnect on failover.
CompareAndDelete(id string, value []byte, p *api.KVPair, q *api.WriteOptions) (bool, *api.WriteMeta, error)
}
type lockOptsOperations interface {
// LockOpts returns pointer to underlying Lock object and an error.
LockOpts(opts *api.LockOptions) (*api.Lock, error)
}
// consulConnection stores current consul connection state
type consulConnection struct {
// config is the configuration used to create consulClient
config *api.Config
// client provides access to consul api
client *api.Client
// once is used to reconnect consulClient only once among concurrently running threads
once *sync.Once
}
// consulClient wraps config information and consul client along with sync functionality to reconnect it once.
// consulClient also satisfies interface defined above.
type consulClientImpl struct {
// conn current consul connection state
conn *consulConnection
// connParams holds all params required to obtain new api client
connParams connectionParams
// reconnectDelay is the time duration to wait between machines
reconnectDelay time.Duration
// maxRetries is the number of times reconnect should be tried.
maxRetries int
}
// newConsulClient provides an instance of clientConsul interface.
func newConsulClient(config *api.Config,
client *api.Client,
reconnectDelay time.Duration,
p connectionParams,
) consulClient {
c := &consulClientImpl{
conn: &consulConnection{
config: config,
client: client,
once: new(sync.Once)},
connParams: p,
reconnectDelay: reconnectDelay,
}
c.maxRetries = 12 /// with default 5 second delay this would be a minute
return c
}
// LockOpts returns pointer to underlying Lock object and an error.
func (c *consulClientImpl) LockOpts(opts *api.LockOptions) (*api.Lock, error) {
return c.conn.client.LockOpts(opts)
}
// reconnect reconnectes to any online and healthy consul server..
func (c *consulClientImpl) reconnect(conn *consulConnection) error {
var err error
// once.Do executes func() only once across concurrently executing threads
conn.once.Do(func() {
var config *api.Config
var client *api.Client
for _, machine := range c.connParams.machines {
if strings.HasPrefix(machine, "http://") {
machine = strings.TrimPrefix(machine, "http://")
} else if strings.HasPrefix(machine, "https://") {
machine = strings.TrimPrefix(machine, "https://")
}
// sleep for requested delay before testing new connection
time.Sleep(c.reconnectDelay)
if config, client, err = newKvClient(machine, c.connParams); err == nil {
c.conn = &consulConnection{
client: client,
config: config,
once: new(sync.Once),
}
logrus.Infof("%s: %s\n", "successfully connected to", machine)
break
} else {
logrus.Errorf("failed to reconnect client on: %s", machine)
}
}
})
if err != nil {
logrus.Infof("Failed to reconnect client: %v", err)
}
return err
}
// isConsulErrNeedingRetry is a type of consul error on which we should try reconnecting consul client.
func isConsulErrNeedingRetry(err error) bool {
return strings.Contains(err.Error(), httpError) ||
strings.Contains(err.Error(), eofError) ||
strings.Contains(err.Error(), connRefused) ||
strings.Contains(err.Error(), nameResolutionError)
}
// isKeyIndexMismatchErr returns true if error contains key index mismatch substring
func isKeyIndexMismatchErr(err error) bool {
return strings.Contains(err.Error(), keyIndexMismatch)
}
// newKvClient constructs new kvdb.Kvdb given a single end-point to connect to.
func newKvClient(machine string, p connectionParams) (*api.Config, *api.Client, error) {
config := api.DefaultConfig()
config.HttpClient = http.DefaultClient
config.Address = machine
config.Scheme = "http"
config.Token = p.options[kvdb.ACLTokenKey]
// check if TLS is required
if p.options[kvdb.TransportScheme] == "https" {
tlsConfig := &api.TLSConfig{
CAFile: p.options[kvdb.CAFileKey],
CertFile: p.options[kvdb.CertFileKey],
KeyFile: p.options[kvdb.CertKeyFileKey],
Address: p.options[kvdb.CAAuthAddress],
InsecureSkipVerify: strings.ToLower(p.options[kvdb.InsecureSkipVerify]) == "true",
}
consulTLSConfig, err := api.SetupTLSConfig(tlsConfig)
if err != nil {
logrus.Fatal(err)
}
config.Scheme = p.options[kvdb.TransportScheme]
config.HttpClient = new(http.Client)
config.HttpClient.Transport = &http.Transport{
TLSClientConfig: consulTLSConfig,
}
}
client, err := api.NewClient(config)
if err != nil {
logrus.Info("consul: failed to get new api client: %v", err)
return nil, nil, err
}
// check health to ensure communication with consul are working
if _, _, err := client.Health().State(api.HealthAny, nil); err != nil {
logrus.Errorf("consul: health check failed for %v : %v", machine, err)
return nil, nil, err
}
return config, client, nil
}
// consulFunc runs a consulFunc operation and returns true if needs to be retried
type consulFunc func() bool
// runWithRetry runs consulFunc with retries if required
func (c *consulClientImpl) runWithRetry(f consulFunc) {
for i := 0; i < c.maxRetries; i++ {
if !f() {
break
}
}
}
// writeFunc defines an update operation for consul with this signature
type writeFunc func(conn *consulConnection) (*api.WriteMeta, error)
// writeRetryFunc runs writeFunc with retries if required
func (c *consulClientImpl) writeRetryFunc(f writeFunc) (*api.WriteMeta, error) {
var err error
var meta *api.WriteMeta
retry := false
c.runWithRetry(func() bool {
conn := c.conn
meta, err = f(conn)
retry, _ = c.reconnectIfConnectionError(conn, err)
return retry
})
return meta, err
}
/// reconnectIfConnectionError returns (retry, error), retry is true is client reconnected
func (c *consulClientImpl) reconnectIfConnectionError(conn *consulConnection, err error) (bool, error) {
if err == nil {
return false, nil
} else if isConsulErrNeedingRetry(err) {
logrus.Errorf("consul connection error: %v, trying to reconnect..", err)
if clientErr := c.reconnect(conn); clientErr != nil {
return false, clientErr
} else {
logrus.Infof("consul connection success, returning true")
return true, nil
}
} else {
return false, err
}
}
func (c *consulClientImpl) Get(key string, q *api.QueryOptions) (*api.KVPair, *api.QueryMeta, error) {
var pair *api.KVPair
var meta *api.QueryMeta
var err error
retry := false
c.runWithRetry(func() bool {
conn := c.conn
pair, meta, err = conn.client.KV().Get(key, q)
retry, _ = c.reconnectIfConnectionError(conn, err)
return retry
})
return pair, meta, err
}
func (c *consulClientImpl) Put(p *api.KVPair, q *api.WriteOptions) (*api.WriteMeta, error) {
return c.writeRetryFunc(func(conn *consulConnection) (*api.WriteMeta, error) {
return conn.client.KV().Put(p, q)
})
}
func (c *consulClientImpl) Delete(key string, w *api.WriteOptions) (*api.WriteMeta, error) {
return c.writeRetryFunc(func(conn *consulConnection) (*api.WriteMeta, error) {
return conn.client.KV().Delete(key, w)
})
}
func (c *consulClientImpl) DeleteTree(prefix string, w *api.WriteOptions) (*api.WriteMeta, error) {
return c.writeRetryFunc(func(conn *consulConnection) (*api.WriteMeta, error) {
return conn.client.KV().DeleteTree(prefix, w)
})
}
func (c *consulClientImpl) Keys(prefix, separator string, q *api.QueryOptions) ([]string, *api.QueryMeta, error) {
var list []string
var meta *api.QueryMeta
var err error
retry := false
c.runWithRetry(func() bool {
conn := c.conn
list, meta, err = conn.client.KV().Keys(prefix, separator, q)
retry, _ = c.reconnectIfConnectionError(conn, err)
return retry
})
return list, meta, err
}
func (c *consulClientImpl) List(prefix string, q *api.QueryOptions) (api.KVPairs, *api.QueryMeta, error) {
var pairs api.KVPairs
var meta *api.QueryMeta
var err error
retry := false
c.runWithRetry(func() bool {
conn := c.conn
pairs, meta, err = conn.client.KV().List(prefix, q)
retry, _ = c.reconnectIfConnectionError(conn, err)
return retry
})
return pairs, meta, err
}
func (c *consulClientImpl) Acquire(p *api.KVPair, q *api.WriteOptions) (*api.WriteMeta, error) {
var err error
var meta *api.WriteMeta
var ok bool
retry := false
c.runWithRetry(func() bool {
conn := c.conn
ok, meta, err = conn.client.KV().Acquire(p, q)
retry, _ = c.reconnectIfConnectionError(conn, err)
return retry
})
// *** this error is created in loop above
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("acquire failed")
}
return meta, err
}
func (c *consulClientImpl) Create(se *api.SessionEntry, q *api.WriteOptions) (string, *api.WriteMeta, error) {
var session string
var meta *api.WriteMeta
var err error
retry := false
c.runWithRetry(func() bool {
conn := c.conn
session, meta, err = conn.client.Session().Create(se, q)
retry, _ = c.reconnectIfConnectionError(conn, err)
return retry
})
return session, meta, err
}
func (c *consulClientImpl) Destroy(id string, q *api.WriteOptions) (*api.WriteMeta, error) {
return c.writeRetryFunc(func(conn *consulConnection) (*api.WriteMeta, error) {
return conn.client.Session().Destroy(id, q)
})
}
func (c *consulClientImpl) Renew(id string, q *api.WriteOptions) (*api.SessionEntry, *api.WriteMeta, error) {
var entry *api.SessionEntry
var meta *api.WriteMeta
var err error
retry := false
c.runWithRetry(func() bool {
conn := c.conn
entry, meta, err = conn.client.Session().Renew(id, q)
retry, _ = c.reconnectIfConnectionError(conn, err)
return retry
})
return entry, meta, err
}
func (c *consulClientImpl) RenewPeriodic(
initialTTL string,
id string,
q *api.WriteOptions,
doneCh chan struct{},
) error {
var err error
retry := false
c.runWithRetry(func() bool {
conn := c.conn
err = conn.client.Session().RenewPeriodic(initialTTL, id, q, doneCh)
retry, _ = c.reconnectIfConnectionError(conn, err)
return retry
})
return err
}
func (c *consulClientImpl) CreateMeta(
id string,
p *api.KVPair,
q *api.WriteOptions,
) (*api.WriteMeta, bool, error) {
var ok bool
var meta *api.WriteMeta
var err error
connError := false
for i := 0; i < c.maxRetries; i++ {
conn := c.conn
ok, meta, err = conn.client.KV().Acquire(p, q)
if ok && err == nil {
return nil, ok, err
}
if _, err := conn.client.Session().Destroy(p.Session, nil); err != nil {
logrus.Error(err)
}
if _, err := c.Delete(id, nil); err != nil {
logrus.Error(err)
}
connError, err = c.reconnectIfConnectionError(conn, err)
if connError {
continue
} else {
break
}
}
if !ok {
return nil, ok, fmt.Errorf("failed to set ttl: %v", err)
}
return meta, ok, err
}
func (c *consulClientImpl) CompareAndSet(
id string,
value []byte,
p *api.KVPair,
q *api.WriteOptions,
) (bool, *api.WriteMeta, error) {
var ok bool
var meta *api.WriteMeta
var err error
retried := false
connError := false
for i := 0; i < c.maxRetries; i++ {
conn := c.conn
ok, meta, err = conn.client.KV().CAS(p, q)
connError, err = c.reconnectIfConnectionError(conn, err)
if connError {
retried = true
continue
} else if err != nil && isKeyIndexMismatchErr(err) && retried {
kvPair, _, getErr := conn.client.KV().Get(id, nil)
if getErr != nil {
// failed to get value from kvdb
return false, nil, err
}
// Prev Value not equal to current value in consul
if bytes.Compare(kvPair.Value, value) != 0 {
return false, nil, err
} else {
// kvdb has the new value that we are trying to set
err = nil
break
}
} else {
break
}
}
return ok, meta, err
}
func (c *consulClientImpl) CompareAndDelete(
id string,
value []byte,
p *api.KVPair,
q *api.WriteOptions,
) (bool, *api.WriteMeta, error) {
var ok bool
var meta *api.WriteMeta
var err error
retried := false
connError := false
for i := 0; i < c.maxRetries; i++ {
conn := c.conn
ok, meta, err = conn.client.KV().DeleteCAS(p, q)
connError, err = c.reconnectIfConnectionError(conn, err)
if connError {
retried = true
continue
} else if retried && err == kvdb.ErrNotFound {
// assuming our delete went through, there is no way
// to figure out who deleted it
err = nil
break
} else {
break
}
}
return ok, meta, err
}
|
package controllers
import (
"net/http"
"github.com/julienschmidt/httprouter"
"github.com/raggaer/castro/app/lua"
"github.com/raggaer/castro/app/util"
)
// LuaPage executes the given lua page
func LuaPage(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
luaState := lua.Pool.Get()
defer lua.Pool.Put(luaState)
if err := luaState.DoFile("pages/" + ps.ByName("page") + ".lua"); err != nil {
util.Logger.Error(err.Error())
}
}
Handling lua page error
package controllers
import (
"net/http"
"github.com/julienschmidt/httprouter"
"github.com/raggaer/castro/app/lua"
"github.com/raggaer/castro/app/util"
)
// LuaPage executes the given lua page
func LuaPage(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
luaState := lua.Pool.Get()
defer lua.Pool.Put(luaState)
if err := luaState.DoFile("pages/" + ps.ByName("page") + ".lua"); err != nil {
util.Logger.Errorf("Cannot execute %v: %v\n", ps.ByName("page"), err)
}
}
|
package gps
import (
"bytes"
"fmt"
"go/build"
gscan "go/scanner"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"text/scanner"
)
var osList []string
var archList []string
var stdlib = make(map[string]bool)
const stdlibPkgs string = "archive archive/tar archive/zip bufio builtin bytes compress compress/bzip2 compress/flate compress/gzip compress/lzw compress/zlib container container/heap container/list container/ring context crypto crypto/aes crypto/cipher crypto/des crypto/dsa crypto/ecdsa crypto/elliptic crypto/hmac crypto/md5 crypto/rand crypto/rc4 crypto/rsa crypto/sha1 crypto/sha256 crypto/sha512 crypto/subtle crypto/tls crypto/x509 crypto/x509/pkix database database/sql database/sql/driver debug debug/dwarf debug/elf debug/gosym debug/macho debug/pe debug/plan9obj encoding encoding/ascii85 encoding/asn1 encoding/base32 encoding/base64 encoding/binary encoding/csv encoding/gob encoding/hex encoding/json encoding/pem encoding/xml errors expvar flag fmt go go/ast go/build go/constant go/doc go/format go/importer go/parser go/printer go/scanner go/token go/types hash hash/adler32 hash/crc32 hash/crc64 hash/fnv html html/template image image/color image/color/palette image/draw image/gif image/jpeg image/png index index/suffixarray io io/ioutil log log/syslog math math/big math/cmplx math/rand mime mime/multipart mime/quotedprintable net net/http net/http/cgi net/http/cookiejar net/http/fcgi net/http/httptest net/http/httputil net/http/pprof net/mail net/rpc net/rpc/jsonrpc net/smtp net/textproto net/url os os/exec os/signal os/user path path/filepath reflect regexp regexp/syntax runtime runtime/cgo runtime/debug runtime/msan runtime/pprof runtime/race runtime/trace sort strconv strings sync sync/atomic syscall testing testing/iotest testing/quick text text/scanner text/tabwriter text/template text/template/parse time unicode unicode/utf16 unicode/utf8 unsafe"
// Before appengine moved to google.golang.org/appengine, it had a magic
// stdlib-like import path. We have to ignore all of these.
const appenginePkgs string = "appengine/aetest appengine/blobstore appengine/capability appengine/channel appengine/cloudsql appengine/cmd appengine/cmd/aebundler appengine/cmd/aedeploy appengine/cmd/aefix appengine/datastore appengine/delay appengine/demos appengine/demos/guestbook appengine/demos/guestbook/templates appengine/demos/helloworld appengine/file appengine/image appengine/internal appengine/internal/aetesting appengine/internal/app_identity appengine/internal/base appengine/internal/blobstore appengine/internal/capability appengine/internal/channel appengine/internal/datastore appengine/internal/image appengine/internal/log appengine/internal/mail appengine/internal/memcache appengine/internal/modules appengine/internal/remote_api appengine/internal/search appengine/internal/socket appengine/internal/system appengine/internal/taskqueue appengine/internal/urlfetch appengine/internal/user appengine/internal/xmpp appengine/log appengine/mail appengine/memcache appengine/module appengine/remote_api appengine/runtime appengine/search appengine/socket appengine/taskqueue appengine/urlfetch appengine/user appengine/xmpp"
func init() {
// The supported systems are listed in
// https://github.com/golang/go/blob/master/src/go/build/syslist.go
// The lists are not exported so we need to duplicate them here.
osListString := "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows"
osList = strings.Split(osListString, " ")
archListString := "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64"
archList = strings.Split(archListString, " ")
for _, pkg := range strings.Split(stdlibPkgs, " ") {
stdlib[pkg] = true
}
for _, pkg := range strings.Split(appenginePkgs, " ") {
stdlib[pkg] = true
}
// Also ignore C
// TODO(sdboyer) actually figure out how to deal with cgo
stdlib["C"] = true
}
// ListPackages reports Go package information about all directories in the tree
// at or below the provided fileRoot.
//
// Directories without any valid Go files are excluded. Directories with
// multiple packages are excluded.
//
// The importRoot parameter is prepended to the relative path when determining
// the import path for each package. The obvious case is for something typical,
// like:
//
// fileRoot = "/home/user/go/src/github.com/foo/bar"
// importRoot = "github.com/foo/bar"
//
// where the fileRoot and importRoot align. However, if you provide:
//
// fileRoot = "/home/user/workspace/path/to/repo"
// importRoot = "github.com/foo/bar"
//
// then the root package at path/to/repo will be ascribed import path
// "github.com/foo/bar", and the package at
// "/home/user/workspace/path/to/repo/baz" will be "github.com/foo/bar/baz".
//
// A PackageTree is returned, which contains the ImportRoot and map of import path
// to PackageOrErr - each path under the root that exists will have either a
// Package, or an error describing why the directory is not a valid package.
func ListPackages(fileRoot, importRoot string) (PackageTree, error) {
// Set up a build.ctx for parsing
ctx := build.Default
ctx.GOROOT = ""
ctx.GOPATH = ""
ctx.UseAllFiles = true
ptree := PackageTree{
ImportRoot: importRoot,
Packages: make(map[string]PackageOrErr),
}
// mkfilter returns two funcs that can be injected into a build.Context,
// letting us filter the results into an "in" and "out" set.
mkfilter := func(files map[string]struct{}) (in, out func(dir string) (fi []os.FileInfo, err error)) {
in = func(dir string) (fi []os.FileInfo, err error) {
all, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
for _, f := range all {
if _, exists := files[f.Name()]; exists {
fi = append(fi, f)
}
}
return fi, nil
}
out = func(dir string) (fi []os.FileInfo, err error) {
all, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
for _, f := range all {
if _, exists := files[f.Name()]; !exists {
fi = append(fi, f)
}
}
return fi, nil
}
return
}
// helper func to create a Package from a *build.Package
happy := func(importPath string, p *build.Package) Package {
// Happy path - simple parsing worked
pkg := Package{
ImportPath: importPath,
CommentPath: p.ImportComment,
Name: p.Name,
Imports: p.Imports,
TestImports: dedupeStrings(p.TestImports, p.XTestImports),
}
return pkg
}
err := filepath.Walk(fileRoot, func(path string, fi os.FileInfo, err error) error {
if err != nil && err != filepath.SkipDir {
return err
}
if !fi.IsDir() {
return nil
}
// Skip dirs that are known to hold non-local/dependency code.
//
// We don't skip _*, or testdata dirs because, while it may be poor
// form, importing them is not a compilation error.
switch fi.Name() {
case "vendor", "Godeps":
return filepath.SkipDir
}
// We do skip dot-dirs, though, because it's such a ubiquitous standard
// that they not be visited by normal commands, and because things get
// really weird if we don't.
if strings.HasPrefix(fi.Name(), ".") {
return filepath.SkipDir
}
// Compute the import path. Run the result through ToSlash(), so that windows
// paths are normalized to Unix separators, as import paths are expected
// to be.
ip := filepath.ToSlash(filepath.Join(importRoot, strings.TrimPrefix(path, fileRoot)))
// Find all the imports, across all os/arch combos
p, err := ctx.ImportDir(path, analysisImportMode())
var pkg Package
if err == nil {
pkg = happy(ip, p)
} else {
switch terr := err.(type) {
case gscan.ErrorList, *gscan.Error:
// This happens if we encounter malformed Go source code
ptree.Packages[ip] = PackageOrErr{
Err: err,
}
return nil
case *build.NoGoError:
ptree.Packages[ip] = PackageOrErr{
Err: err,
}
return nil
case *build.MultiplePackageError:
// Set this up preemptively, so we can easily just return out if
// something goes wrong. Otherwise, it'll get transparently
// overwritten later.
ptree.Packages[ip] = PackageOrErr{
Err: err,
}
// For now, we're punting entirely on dealing with os/arch
// combinations. That will be a more significant refactor.
//
// However, there is one case we want to allow here - one or
// more files with "+build ignore" with package `main`. (Ignore
// is just a convention, but for now it's good enough to just
// check that.) This is a fairly common way to give examples,
// and to make a more sophisticated build system than a Makefile
// allows, so we want to support that case. So, transparently
// lump the deps together.
mains := make(map[string]struct{})
for k, pkgname := range terr.Packages {
if pkgname == "main" {
tags, err2 := readFileBuildTags(filepath.Join(path, terr.Files[k]))
if err2 != nil {
return nil
}
var hasignore bool
for _, t := range tags {
if t == "ignore" {
hasignore = true
break
}
}
if !hasignore {
// No ignore tag found - bail out
return nil
}
mains[terr.Files[k]] = struct{}{}
}
}
// Make filtering funcs that will let us look only at the main
// files, and exclude the main files; inf and outf, respectively
inf, outf := mkfilter(mains)
// outf first; if there's another err there, we bail out with a
// return
ctx.ReadDir = outf
po, err2 := ctx.ImportDir(path, analysisImportMode())
if err2 != nil {
return nil
}
ctx.ReadDir = inf
pi, err2 := ctx.ImportDir(path, analysisImportMode())
if err2 != nil {
return nil
}
ctx.ReadDir = nil
// Use the other files as baseline, they're the main stuff
pkg = happy(ip, po)
mpkg := happy(ip, pi)
pkg.Imports = dedupeStrings(pkg.Imports, mpkg.Imports)
pkg.TestImports = dedupeStrings(pkg.TestImports, mpkg.TestImports)
default:
return err
}
}
// This area has some...fuzzy rules, but check all the imports for
// local/relative/dot-ness, and record an error for the package if we
// see any.
var lim []string
for _, imp := range append(pkg.Imports, pkg.TestImports...) {
switch {
// Do allow the single-dot, at least for now
case imp == "..":
lim = append(lim, imp)
// ignore stdlib done this way, b/c that's what the go tooling does
case strings.HasPrefix(imp, "./"):
if stdlib[imp[2:]] {
lim = append(lim, imp)
}
case strings.HasPrefix(imp, "../"):
if stdlib[imp[3:]] {
lim = append(lim, imp)
}
}
}
if len(lim) > 0 {
ptree.Packages[ip] = PackageOrErr{
Err: &LocalImportsError{
Dir: ip,
LocalImports: lim,
},
}
} else {
ptree.Packages[ip] = PackageOrErr{
P: pkg,
}
}
return nil
})
if err != nil {
return PackageTree{}, err
}
return ptree, nil
}
// LocalImportsError indicates that a package contains at least one relative
// import that will prevent it from compiling.
//
// TODO(sdboyer) add a Files property once we're doing our own per-file parsing
type LocalImportsError struct {
Dir string
LocalImports []string
}
func (e *LocalImportsError) Error() string {
return fmt.Sprintf("import path %s had problematic local imports", e.Dir)
}
func readFileBuildTags(fp string) ([]string, error) {
co, err := readGoContents(fp)
if err != nil {
return []string{}, err
}
var tags []string
// Only look at places where we had a code comment.
if len(co) > 0 {
t := findTags(co)
for _, tg := range t {
found := false
for _, tt := range tags {
if tt == tg {
found = true
}
}
if !found {
tags = append(tags, tg)
}
}
}
return tags, nil
}
// Read contents of a Go file up to the package declaration. This can be used
// to find the the build tags.
func readGoContents(fp string) ([]byte, error) {
f, err := os.Open(fp)
defer f.Close()
if err != nil {
return []byte{}, err
}
var s scanner.Scanner
s.Init(f)
var tok rune
var pos scanner.Position
for tok != scanner.EOF {
tok = s.Scan()
// Getting the token text will skip comments by default.
tt := s.TokenText()
// build tags will not be after the package declaration.
if tt == "package" {
pos = s.Position
break
}
}
var buf bytes.Buffer
f.Seek(0, 0)
_, err = io.CopyN(&buf, f, int64(pos.Offset))
if err != nil {
return []byte{}, err
}
return buf.Bytes(), nil
}
// From a byte slice of a Go file find the tags.
func findTags(co []byte) []string {
p := co
var tgs []string
for len(p) > 0 {
line := p
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, p = line[:i], p[i+1:]
} else {
p = p[len(p):]
}
line = bytes.TrimSpace(line)
// Only look at comment lines that are well formed in the Go style
if bytes.HasPrefix(line, []byte("//")) {
line = bytes.TrimSpace(line[len([]byte("//")):])
if len(line) > 0 && line[0] == '+' {
f := strings.Fields(string(line))
// We've found a +build tag line.
if f[0] == "+build" {
for _, tg := range f[1:] {
tgs = append(tgs, tg)
}
}
}
}
}
return tgs
}
// A PackageTree represents the results of recursively parsing a tree of
// packages, starting at the ImportRoot. The results of parsing the files in the
// directory identified by each import path - a Package or an error - are stored
// in the Packages map, keyed by that import path.
type PackageTree struct {
ImportRoot string
Packages map[string]PackageOrErr
}
// dup copies the PackageTree.
//
// This is really only useful as a defensive measure to prevent external state
// mutations.
func (t PackageTree) dup() PackageTree {
t2 := PackageTree{
ImportRoot: t.ImportRoot,
Packages: map[string]PackageOrErr{},
}
for path, poe := range t.Packages {
poe2 := PackageOrErr{
Err: poe.Err,
P: poe.P,
}
if len(poe.P.Imports) > 0 {
poe2.P.Imports = make([]string, len(poe.P.Imports))
copy(poe2.P.Imports, poe.P.Imports)
}
if len(poe.P.TestImports) > 0 {
poe2.P.TestImports = make([]string, len(poe.P.TestImports))
copy(poe2.P.TestImports, poe.P.TestImports)
}
t2.Packages[path] = poe2
}
return t2
}
type wm struct {
err error
ex map[string]bool
in map[string]bool
}
// PackageOrErr stores the results of attempting to parse a single directory for
// Go source code.
type PackageOrErr struct {
P Package
Err error
}
// ReachMap maps a set of import paths (keys) to the set of external packages
// transitively reachable from the packages at those import paths.
//
// See PackageTree.ExternalReach() for more information.
type ReachMap map[string][]string
// ExternalReach looks through a PackageTree and computes the list of external
// import statements (that is, import statements pointing to packages that are
// not logical children of PackageTree.ImportRoot) that are transitively
// imported by the internal packages in the tree.
//
// main indicates whether (true) or not (false) to include main packages in the
// analysis. When utilized by gps' solver, main packages are generally excluded
// from analyzing anything other than the root project, as they necessarily can't
// be imported.
//
// tests indicates whether (true) or not (false) to include imports from test
// files in packages when computing the reach map.
//
// ignore is a map of import paths that, if encountered, should be excluded from
// analysis. This exclusion applies to both internal and external packages. If
// an external import path is ignored, it is simply omitted from the results.
//
// If an internal path is ignored, then not only does it not appear in the final
// map, but it is also excluded from the transitive calculations of other
// internal packages. That is, if you ignore A/foo, then the external package
// list for all internal packages that import A/foo will not include external
// packages that are only reachable through A/foo.
//
// Visually, this means that, given a PackageTree with root A and packages at A,
// A/foo, and A/bar, and the following import chain:
//
// A -> A/foo -> A/bar -> B/baz
//
// In this configuration, all of A's packages transitively import B/baz, so the
// returned map would be:
//
// map[string][]string{
// "A": []string{"B/baz"},
// "A/foo": []string{"B/baz"}
// "A/bar": []string{"B/baz"},
// }
//
// However, if you ignore A/foo, then A's path to B/baz is broken, and A/foo is
// omitted entirely. Thus, the returned map would be:
//
// map[string][]string{
// "A": []string{},
// "A/bar": []string{"B/baz"},
// }
//
// If there are no packages to ignore, it is safe to pass a nil map.
func (t PackageTree) ExternalReach(main, tests bool, ignore map[string]bool) ReachMap {
if ignore == nil {
ignore = make(map[string]bool)
}
// world's simplest adjacency list
workmap := make(map[string]wm)
var imps []string
for ip, perr := range t.Packages {
if perr.Err != nil {
workmap[ip] = wm{
err: perr.Err,
}
continue
}
p := perr.P
// Skip main packages, unless param says otherwise
if p.Name == "main" && !main {
continue
}
// Skip ignored packages
if ignore[ip] {
continue
}
imps = imps[:0]
imps = p.Imports
if tests {
imps = dedupeStrings(imps, p.TestImports)
}
w := wm{
ex: make(map[string]bool),
in: make(map[string]bool),
}
for _, imp := range imps {
// Skip ignored imports
if ignore[imp] {
continue
}
if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) {
w.ex[imp] = true
} else {
if w2, seen := workmap[imp]; seen {
for i := range w2.ex {
w.ex[i] = true
}
for i := range w2.in {
w.in[i] = true
}
} else {
w.in[imp] = true
}
}
}
workmap[ip] = w
}
//return wmToReach(workmap, t.ImportRoot)
return wmToReach(workmap, "") // TODO(sdboyer) this passes tests, but doesn't seem right
}
// wmToReach takes an internal "workmap" constructed by
// PackageTree.ExternalReach(), transitively walks (via depth-first traversal)
// all internal imports until they reach an external path or terminate, then
// translates the results into a slice of external imports for each internal
// pkg.
//
// The basedir string, with a trailing slash ensured, will be stripped from the
// keys of the returned map.
//
// This is mostly separated out for testing purposes.
func wmToReach(workmap map[string]wm, basedir string) map[string][]string {
// Uses depth-first exploration to compute reachability into external
// packages, dropping any internal packages on "poisoned paths" - a path
// containing a package with an error, or with a dep on an internal package
// that's missing.
const (
white uint8 = iota
grey
black
)
colors := make(map[string]uint8)
allreachsets := make(map[string]map[string]struct{})
// poison is a helper func to eliminate specific reachsets from allreachsets
poison := func(path []string) {
for _, ppkg := range path {
delete(allreachsets, ppkg)
}
}
var dfe func(string, []string) bool
// dfe is the depth-first-explorer that computes a safe, error-free external
// reach map.
//
// pkg is the import path of the pkg currently being visited; path is the
// stack of parent packages we've visited to get to pkg. The return value
// indicates whether the level completed successfully (true) or if it was
// poisoned (false).
//
// TODO(sdboyer) some deft improvements could probably be made by passing the list of
// parent reachsets, rather than a list of parent package string names.
// might be able to eliminate the use of allreachsets map-of-maps entirely.
dfe = func(pkg string, path []string) bool {
// white is the zero value of uint8, which is what we want if the pkg
// isn't in the colors map, so this works fine
switch colors[pkg] {
case white:
// first visit to this pkg; mark it as in-process (grey)
colors[pkg] = grey
// make sure it's present and w/out errs
w, exists := workmap[pkg]
if !exists || w.err != nil {
// Does not exist or has an err; poison self and all parents
poison(path)
// we know we're done here, so mark it black
colors[pkg] = black
return false
}
// pkg exists with no errs. mark it as in-process (grey), and start
// a reachmap for it
//
// TODO(sdboyer) use sync.Pool here? can be lots of explicit map alloc/dealloc
rs := make(map[string]struct{})
// Push self onto the path slice. Passing this as a value has the
// effect of auto-popping the slice, while also giving us safe
// memory reuse.
path = append(path, pkg)
// Dump this package's external pkgs into its own reachset. Separate
// loop from the parent dump to avoid nested map loop lookups.
for ex := range w.ex {
rs[ex] = struct{}{}
}
allreachsets[pkg] = rs
// Push this pkg's external imports into all parent reachsets. Not
// all parents will necessarily have a reachset; none, some, or all
// could have been poisoned by a different path than what we're on
// right now. (Or we could be at depth 0)
for _, ppkg := range path {
if prs, exists := allreachsets[ppkg]; exists {
for ex := range w.ex {
prs[ex] = struct{}{}
}
}
}
// Now, recurse until done, or a false bubbles up, indicating the
// path is poisoned.
var clean bool
for in := range w.in {
// It's possible, albeit weird, for a package to import itself.
// If we try to visit self, though, then it erroneously poisons
// the path, as it would be interpreted as grey. In reality,
// this becomes a no-op, so just skip it.
if in == pkg {
continue
}
clean = dfe(in, path)
if !clean {
// Path is poisoned. Our reachmap was already deleted by the
// path we're returning from; mark ourselves black, then
// bubble up the poison. This is OK to do early, before
// exploring all internal imports, because the outer loop
// visits all internal packages anyway.
//
// In fact, stopping early is preferable - white subpackages
// won't have to iterate pointlessly through a parent path
// with no reachset.
colors[pkg] = black
return false
}
}
// Fully done with this pkg; no transitive problems.
colors[pkg] = black
return true
case grey:
// grey means an import cycle; guaranteed badness right here. You'd
// hope we never encounter it in a dependency (really? you published
// that code?), but we have to defend against it.
//
// FIXME handle import cycles by dropping everything involved. (i
// think we need to compute SCC, then drop *all* of them?)
colors[pkg] = black
poison(append(path, pkg)) // poison self and parents
case black:
// black means we're done with the package. If it has an entry in
// allreachsets, it completed successfully. If not, it was poisoned,
// and we need to bubble the poison back up.
rs, exists := allreachsets[pkg]
if !exists {
// just poison parents; self was necessarily already poisoned
poison(path)
return false
}
// It's good; pull over of the external imports from its reachset
// into all non-poisoned parent reachsets
for _, ppkg := range path {
if prs, exists := allreachsets[ppkg]; exists {
for ex := range rs {
prs[ex] = struct{}{}
}
}
}
return true
default:
panic(fmt.Sprintf("invalid color marker %v for %s", colors[pkg], pkg))
}
// shouldn't ever hit this
return false
}
// Run the depth-first exploration.
//
// Don't bother computing graph sources, this straightforward loop works
// comparably well, and fits nicely with an escape hatch in the dfe.
var path []string
for pkg := range workmap {
dfe(pkg, path)
}
if len(allreachsets) == 0 {
return nil
}
// Flatten allreachsets into the final reachlist
rt := strings.TrimSuffix(basedir, string(os.PathSeparator)) + string(os.PathSeparator)
rm := make(map[string][]string)
for pkg, rs := range allreachsets {
rlen := len(rs)
if rlen == 0 {
rm[strings.TrimPrefix(pkg, rt)] = nil
continue
}
edeps := make([]string, rlen)
k := 0
for opkg := range rs {
edeps[k] = opkg
k++
}
sort.Strings(edeps)
rm[strings.TrimPrefix(pkg, rt)] = edeps
}
return rm
}
// ListExternalImports computes a sorted, deduplicated list of all the external
// packages that are reachable through imports from all valid packages in a
// ReachMap, as computed by PackageTree.ExternalReach().
//
// main and tests determine whether main packages and test imports should be
// included in the calculation. "External" is defined as anything not prefixed,
// after path cleaning, by the PackageTree.ImportRoot. This includes stdlib.
//
// If an internal path is ignored, all of the external packages that it uniquely
// imports are omitted. Note, however, that no internal transitivity checks are
// made here - every non-ignored package in the tree is considered independently
// (with one set of exceptions, noted below). That means, given a PackageTree
// with root A and packages at A, A/foo, and A/bar, and the following import
// chain:
//
// A -> A/foo -> A/bar -> B/baz
//
// If you ignore A or A/foo, A/bar will still be visited, and B/baz will be
// returned, because this method visits ALL packages in the tree, not only those reachable
// from the root (or any other) packages. If your use case requires interrogating
// external imports with respect to only specific package entry points, you need
// ExternalReach() instead.
//
// It is safe to pass a nil map if there are no packages to ignore.
//
// If an internal package has an error (that is, PackageOrErr is Err), it is excluded from
// consideration. Internal packages that transitively import the error package
// are also excluded. So, if:
//
// -> B/foo
// /
// A
// \
// -> A/bar -> B/baz
//
// And A/bar has some error in it, then both A and A/bar will be eliminated from
// consideration; neither B/foo nor B/baz will be in the results. If A/bar, with
// its errors, is ignored, however, then A will remain, and B/foo will be in the
// results.
//
// Finally, note that if a directory is named "testdata", or has a leading dot
// or underscore, it will not be directly analyzed as a source. This is in
// keeping with Go tooling conventions that such directories should be ignored.
// So, if:
//
// A -> B/foo
// A/.bar -> B/baz
// A/_qux -> B/baz
// A/testdata -> B/baz
//
// Then B/foo will be returned, but B/baz will not, because all three of the
// packages that import it are in directories with disallowed names.
//
// HOWEVER, in keeping with the Go compiler, if one of those packages in a
// disallowed directory is imported by a package in an allowed directory, then
// it *will* be used. That is, while tools like go list will ignore a directory
// named .foo, you can still import from .foo. Thus, it must be included. So,
// if:
//
// -> B/foo
// /
// A
// \
// -> A/.bar -> B/baz
//
// A is legal, and it imports A/.bar, so the results will include B/baz.
func (rm ReachMap) ListExternalImports() []string {
exm := make(map[string]struct{})
for pkg, reach := range rm {
// Eliminate import paths with any elements having leading dots, leading
// underscores, or testdata. If these are internally reachable (which is
// a no-no, but possible), any external imports will have already been
// pulled up through ExternalReach. The key here is that we don't want
// to treat such packages as themselves being sources.
//
// TODO(sdboyer) strings.Split will always heap alloc, which isn't great to do
// in a loop like this. We could also just parse it ourselves...
var skip bool
for _, elem := range strings.Split(pkg, "/") {
if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
skip = true
break
}
}
if !skip {
for _, ex := range reach {
exm[ex] = struct{}{}
}
}
}
if len(exm) == 0 {
return nil
}
ex := make([]string, len(exm))
k := 0
for p := range exm {
ex[k] = p
k++
}
sort.Strings(ex)
return ex
}
// checkPrefixSlash checks to see if the prefix is a prefix of the string as-is,
// and that it is either equal OR the prefix + / is still a prefix.
func checkPrefixSlash(s, prefix string) bool {
if !strings.HasPrefix(s, prefix) {
return false
}
return s == prefix || strings.HasPrefix(s, ensureTrailingSlash(prefix))
}
func ensureTrailingSlash(s string) string {
return strings.TrimSuffix(s, string(os.PathSeparator)) + string(os.PathSeparator)
}
// helper func to merge, dedupe, and sort strings
func dedupeStrings(s1, s2 []string) (r []string) {
dedupe := make(map[string]bool)
if len(s1) > 0 && len(s2) > 0 {
for _, i := range s1 {
dedupe[i] = true
}
for _, i := range s2 {
dedupe[i] = true
}
for i := range dedupe {
r = append(r, i)
}
// And then re-sort them
sort.Strings(r)
} else if len(s1) > 0 {
r = s1
} else if len(s2) > 0 {
r = s2
}
return
}
Replace stdlib literal list w/pattern checker
This mirrors what's done in stdlib. Might not be the best long-term
solution, but it's OK for now.
package gps
import (
"bytes"
"fmt"
"go/build"
gscan "go/scanner"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"text/scanner"
)
var osList []string
var archList []string
func init() {
// The supported systems are listed in
// https://github.com/golang/go/blob/master/src/go/build/syslist.go
// The lists are not exported, so we need to duplicate them here.
osListString := "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows"
osList = strings.Split(osListString, " ")
archListString := "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64"
archList = strings.Split(archListString, " ")
}
// This was loving taken from src/cmd/go/pkg.go in Go's code (isStandardImportPath).
//
// Stored as a var so that tests can swap it out. Ugh globals, ugh.
var isStdLib = func(path string) bool {
i := strings.Index(path, "/")
if i < 0 {
i = len(path)
}
return !strings.Contains(path[:i], ".")
}
// ListPackages reports Go package information about all directories in the tree
// at or below the provided fileRoot.
//
// Directories without any valid Go files are excluded. Directories with
// multiple packages are excluded.
//
// The importRoot parameter is prepended to the relative path when determining
// the import path for each package. The obvious case is for something typical,
// like:
//
// fileRoot = "/home/user/go/src/github.com/foo/bar"
// importRoot = "github.com/foo/bar"
//
// where the fileRoot and importRoot align. However, if you provide:
//
// fileRoot = "/home/user/workspace/path/to/repo"
// importRoot = "github.com/foo/bar"
//
// then the root package at path/to/repo will be ascribed import path
// "github.com/foo/bar", and the package at
// "/home/user/workspace/path/to/repo/baz" will be "github.com/foo/bar/baz".
//
// A PackageTree is returned, which contains the ImportRoot and map of import path
// to PackageOrErr - each path under the root that exists will have either a
// Package, or an error describing why the directory is not a valid package.
func ListPackages(fileRoot, importRoot string) (PackageTree, error) {
// Set up a build.ctx for parsing
ctx := build.Default
ctx.GOROOT = ""
ctx.GOPATH = ""
ctx.UseAllFiles = true
ptree := PackageTree{
ImportRoot: importRoot,
Packages: make(map[string]PackageOrErr),
}
// mkfilter returns two funcs that can be injected into a build.Context,
// letting us filter the results into an "in" and "out" set.
mkfilter := func(files map[string]struct{}) (in, out func(dir string) (fi []os.FileInfo, err error)) {
in = func(dir string) (fi []os.FileInfo, err error) {
all, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
for _, f := range all {
if _, exists := files[f.Name()]; exists {
fi = append(fi, f)
}
}
return fi, nil
}
out = func(dir string) (fi []os.FileInfo, err error) {
all, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
for _, f := range all {
if _, exists := files[f.Name()]; !exists {
fi = append(fi, f)
}
}
return fi, nil
}
return
}
// helper func to create a Package from a *build.Package
happy := func(importPath string, p *build.Package) Package {
// Happy path - simple parsing worked
pkg := Package{
ImportPath: importPath,
CommentPath: p.ImportComment,
Name: p.Name,
Imports: p.Imports,
TestImports: dedupeStrings(p.TestImports, p.XTestImports),
}
return pkg
}
err := filepath.Walk(fileRoot, func(path string, fi os.FileInfo, err error) error {
if err != nil && err != filepath.SkipDir {
return err
}
if !fi.IsDir() {
return nil
}
// Skip dirs that are known to hold non-local/dependency code.
//
// We don't skip _*, or testdata dirs because, while it may be poor
// form, importing them is not a compilation error.
switch fi.Name() {
case "vendor", "Godeps":
return filepath.SkipDir
}
// We do skip dot-dirs, though, because it's such a ubiquitous standard
// that they not be visited by normal commands, and because things get
// really weird if we don't.
if strings.HasPrefix(fi.Name(), ".") {
return filepath.SkipDir
}
// Compute the import path. Run the result through ToSlash(), so that windows
// paths are normalized to Unix separators, as import paths are expected
// to be.
ip := filepath.ToSlash(filepath.Join(importRoot, strings.TrimPrefix(path, fileRoot)))
// Find all the imports, across all os/arch combos
p, err := ctx.ImportDir(path, analysisImportMode())
var pkg Package
if err == nil {
pkg = happy(ip, p)
} else {
switch terr := err.(type) {
case gscan.ErrorList, *gscan.Error:
// This happens if we encounter malformed Go source code
ptree.Packages[ip] = PackageOrErr{
Err: err,
}
return nil
case *build.NoGoError:
ptree.Packages[ip] = PackageOrErr{
Err: err,
}
return nil
case *build.MultiplePackageError:
// Set this up preemptively, so we can easily just return out if
// something goes wrong. Otherwise, it'll get transparently
// overwritten later.
ptree.Packages[ip] = PackageOrErr{
Err: err,
}
// For now, we're punting entirely on dealing with os/arch
// combinations. That will be a more significant refactor.
//
// However, there is one case we want to allow here - one or
// more files with "+build ignore" with package `main`. (Ignore
// is just a convention, but for now it's good enough to just
// check that.) This is a fairly common way to give examples,
// and to make a more sophisticated build system than a Makefile
// allows, so we want to support that case. So, transparently
// lump the deps together.
mains := make(map[string]struct{})
for k, pkgname := range terr.Packages {
if pkgname == "main" {
tags, err2 := readFileBuildTags(filepath.Join(path, terr.Files[k]))
if err2 != nil {
return nil
}
var hasignore bool
for _, t := range tags {
if t == "ignore" {
hasignore = true
break
}
}
if !hasignore {
// No ignore tag found - bail out
return nil
}
mains[terr.Files[k]] = struct{}{}
}
}
// Make filtering funcs that will let us look only at the main
// files, and exclude the main files; inf and outf, respectively
inf, outf := mkfilter(mains)
// outf first; if there's another err there, we bail out with a
// return
ctx.ReadDir = outf
po, err2 := ctx.ImportDir(path, analysisImportMode())
if err2 != nil {
return nil
}
ctx.ReadDir = inf
pi, err2 := ctx.ImportDir(path, analysisImportMode())
if err2 != nil {
return nil
}
ctx.ReadDir = nil
// Use the other files as baseline, they're the main stuff
pkg = happy(ip, po)
mpkg := happy(ip, pi)
pkg.Imports = dedupeStrings(pkg.Imports, mpkg.Imports)
pkg.TestImports = dedupeStrings(pkg.TestImports, mpkg.TestImports)
default:
return err
}
}
// This area has some...fuzzy rules, but check all the imports for
// local/relative/dot-ness, and record an error for the package if we
// see any.
var lim []string
for _, imp := range append(pkg.Imports, pkg.TestImports...) {
switch {
// Do allow the single-dot, at least for now
case imp == "..":
lim = append(lim, imp)
// ignore stdlib done this way, b/c that's what the go tooling does
case strings.HasPrefix(imp, "./"):
if stdlib[imp[2:]] {
lim = append(lim, imp)
}
case strings.HasPrefix(imp, "../"):
if stdlib[imp[3:]] {
lim = append(lim, imp)
}
}
}
if len(lim) > 0 {
ptree.Packages[ip] = PackageOrErr{
Err: &LocalImportsError{
Dir: ip,
LocalImports: lim,
},
}
} else {
ptree.Packages[ip] = PackageOrErr{
P: pkg,
}
}
return nil
})
if err != nil {
return PackageTree{}, err
}
return ptree, nil
}
// LocalImportsError indicates that a package contains at least one relative
// import that will prevent it from compiling.
//
// TODO(sdboyer) add a Files property once we're doing our own per-file parsing
type LocalImportsError struct {
Dir string
LocalImports []string
}
func (e *LocalImportsError) Error() string {
return fmt.Sprintf("import path %s had problematic local imports", e.Dir)
}
func readFileBuildTags(fp string) ([]string, error) {
co, err := readGoContents(fp)
if err != nil {
return []string{}, err
}
var tags []string
// Only look at places where we had a code comment.
if len(co) > 0 {
t := findTags(co)
for _, tg := range t {
found := false
for _, tt := range tags {
if tt == tg {
found = true
}
}
if !found {
tags = append(tags, tg)
}
}
}
return tags, nil
}
// Read contents of a Go file up to the package declaration. This can be used
// to find the the build tags.
func readGoContents(fp string) ([]byte, error) {
f, err := os.Open(fp)
defer f.Close()
if err != nil {
return []byte{}, err
}
var s scanner.Scanner
s.Init(f)
var tok rune
var pos scanner.Position
for tok != scanner.EOF {
tok = s.Scan()
// Getting the token text will skip comments by default.
tt := s.TokenText()
// build tags will not be after the package declaration.
if tt == "package" {
pos = s.Position
break
}
}
var buf bytes.Buffer
f.Seek(0, 0)
_, err = io.CopyN(&buf, f, int64(pos.Offset))
if err != nil {
return []byte{}, err
}
return buf.Bytes(), nil
}
// From a byte slice of a Go file find the tags.
func findTags(co []byte) []string {
p := co
var tgs []string
for len(p) > 0 {
line := p
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, p = line[:i], p[i+1:]
} else {
p = p[len(p):]
}
line = bytes.TrimSpace(line)
// Only look at comment lines that are well formed in the Go style
if bytes.HasPrefix(line, []byte("//")) {
line = bytes.TrimSpace(line[len([]byte("//")):])
if len(line) > 0 && line[0] == '+' {
f := strings.Fields(string(line))
// We've found a +build tag line.
if f[0] == "+build" {
for _, tg := range f[1:] {
tgs = append(tgs, tg)
}
}
}
}
}
return tgs
}
// A PackageTree represents the results of recursively parsing a tree of
// packages, starting at the ImportRoot. The results of parsing the files in the
// directory identified by each import path - a Package or an error - are stored
// in the Packages map, keyed by that import path.
type PackageTree struct {
ImportRoot string
Packages map[string]PackageOrErr
}
// dup copies the PackageTree.
//
// This is really only useful as a defensive measure to prevent external state
// mutations.
func (t PackageTree) dup() PackageTree {
t2 := PackageTree{
ImportRoot: t.ImportRoot,
Packages: map[string]PackageOrErr{},
}
for path, poe := range t.Packages {
poe2 := PackageOrErr{
Err: poe.Err,
P: poe.P,
}
if len(poe.P.Imports) > 0 {
poe2.P.Imports = make([]string, len(poe.P.Imports))
copy(poe2.P.Imports, poe.P.Imports)
}
if len(poe.P.TestImports) > 0 {
poe2.P.TestImports = make([]string, len(poe.P.TestImports))
copy(poe2.P.TestImports, poe.P.TestImports)
}
t2.Packages[path] = poe2
}
return t2
}
type wm struct {
err error
ex map[string]bool
in map[string]bool
}
// PackageOrErr stores the results of attempting to parse a single directory for
// Go source code.
type PackageOrErr struct {
P Package
Err error
}
// ReachMap maps a set of import paths (keys) to the set of external packages
// transitively reachable from the packages at those import paths.
//
// See PackageTree.ExternalReach() for more information.
type ReachMap map[string][]string
// ExternalReach looks through a PackageTree and computes the list of external
// import statements (that is, import statements pointing to packages that are
// not logical children of PackageTree.ImportRoot) that are transitively
// imported by the internal packages in the tree.
//
// main indicates whether (true) or not (false) to include main packages in the
// analysis. When utilized by gps' solver, main packages are generally excluded
// from analyzing anything other than the root project, as they necessarily can't
// be imported.
//
// tests indicates whether (true) or not (false) to include imports from test
// files in packages when computing the reach map.
//
// ignore is a map of import paths that, if encountered, should be excluded from
// analysis. This exclusion applies to both internal and external packages. If
// an external import path is ignored, it is simply omitted from the results.
//
// If an internal path is ignored, then not only does it not appear in the final
// map, but it is also excluded from the transitive calculations of other
// internal packages. That is, if you ignore A/foo, then the external package
// list for all internal packages that import A/foo will not include external
// packages that are only reachable through A/foo.
//
// Visually, this means that, given a PackageTree with root A and packages at A,
// A/foo, and A/bar, and the following import chain:
//
// A -> A/foo -> A/bar -> B/baz
//
// In this configuration, all of A's packages transitively import B/baz, so the
// returned map would be:
//
// map[string][]string{
// "A": []string{"B/baz"},
// "A/foo": []string{"B/baz"}
// "A/bar": []string{"B/baz"},
// }
//
// However, if you ignore A/foo, then A's path to B/baz is broken, and A/foo is
// omitted entirely. Thus, the returned map would be:
//
// map[string][]string{
// "A": []string{},
// "A/bar": []string{"B/baz"},
// }
//
// If there are no packages to ignore, it is safe to pass a nil map.
func (t PackageTree) ExternalReach(main, tests bool, ignore map[string]bool) ReachMap {
if ignore == nil {
ignore = make(map[string]bool)
}
// world's simplest adjacency list
workmap := make(map[string]wm)
var imps []string
for ip, perr := range t.Packages {
if perr.Err != nil {
workmap[ip] = wm{
err: perr.Err,
}
continue
}
p := perr.P
// Skip main packages, unless param says otherwise
if p.Name == "main" && !main {
continue
}
// Skip ignored packages
if ignore[ip] {
continue
}
imps = imps[:0]
imps = p.Imports
if tests {
imps = dedupeStrings(imps, p.TestImports)
}
w := wm{
ex: make(map[string]bool),
in: make(map[string]bool),
}
for _, imp := range imps {
// Skip ignored imports
if ignore[imp] {
continue
}
if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) {
w.ex[imp] = true
} else {
if w2, seen := workmap[imp]; seen {
for i := range w2.ex {
w.ex[i] = true
}
for i := range w2.in {
w.in[i] = true
}
} else {
w.in[imp] = true
}
}
}
workmap[ip] = w
}
//return wmToReach(workmap, t.ImportRoot)
return wmToReach(workmap, "") // TODO(sdboyer) this passes tests, but doesn't seem right
}
// wmToReach takes an internal "workmap" constructed by
// PackageTree.ExternalReach(), transitively walks (via depth-first traversal)
// all internal imports until they reach an external path or terminate, then
// translates the results into a slice of external imports for each internal
// pkg.
//
// The basedir string, with a trailing slash ensured, will be stripped from the
// keys of the returned map.
//
// This is mostly separated out for testing purposes.
func wmToReach(workmap map[string]wm, basedir string) map[string][]string {
// Uses depth-first exploration to compute reachability into external
// packages, dropping any internal packages on "poisoned paths" - a path
// containing a package with an error, or with a dep on an internal package
// that's missing.
const (
white uint8 = iota
grey
black
)
colors := make(map[string]uint8)
allreachsets := make(map[string]map[string]struct{})
// poison is a helper func to eliminate specific reachsets from allreachsets
poison := func(path []string) {
for _, ppkg := range path {
delete(allreachsets, ppkg)
}
}
var dfe func(string, []string) bool
// dfe is the depth-first-explorer that computes a safe, error-free external
// reach map.
//
// pkg is the import path of the pkg currently being visited; path is the
// stack of parent packages we've visited to get to pkg. The return value
// indicates whether the level completed successfully (true) or if it was
// poisoned (false).
//
// TODO(sdboyer) some deft improvements could probably be made by passing the list of
// parent reachsets, rather than a list of parent package string names.
// might be able to eliminate the use of allreachsets map-of-maps entirely.
dfe = func(pkg string, path []string) bool {
// white is the zero value of uint8, which is what we want if the pkg
// isn't in the colors map, so this works fine
switch colors[pkg] {
case white:
// first visit to this pkg; mark it as in-process (grey)
colors[pkg] = grey
// make sure it's present and w/out errs
w, exists := workmap[pkg]
if !exists || w.err != nil {
// Does not exist or has an err; poison self and all parents
poison(path)
// we know we're done here, so mark it black
colors[pkg] = black
return false
}
// pkg exists with no errs. mark it as in-process (grey), and start
// a reachmap for it
//
// TODO(sdboyer) use sync.Pool here? can be lots of explicit map alloc/dealloc
rs := make(map[string]struct{})
// Push self onto the path slice. Passing this as a value has the
// effect of auto-popping the slice, while also giving us safe
// memory reuse.
path = append(path, pkg)
// Dump this package's external pkgs into its own reachset. Separate
// loop from the parent dump to avoid nested map loop lookups.
for ex := range w.ex {
rs[ex] = struct{}{}
}
allreachsets[pkg] = rs
// Push this pkg's external imports into all parent reachsets. Not
// all parents will necessarily have a reachset; none, some, or all
// could have been poisoned by a different path than what we're on
// right now. (Or we could be at depth 0)
for _, ppkg := range path {
if prs, exists := allreachsets[ppkg]; exists {
for ex := range w.ex {
prs[ex] = struct{}{}
}
}
}
// Now, recurse until done, or a false bubbles up, indicating the
// path is poisoned.
var clean bool
for in := range w.in {
// It's possible, albeit weird, for a package to import itself.
// If we try to visit self, though, then it erroneously poisons
// the path, as it would be interpreted as grey. In reality,
// this becomes a no-op, so just skip it.
if in == pkg {
continue
}
clean = dfe(in, path)
if !clean {
// Path is poisoned. Our reachmap was already deleted by the
// path we're returning from; mark ourselves black, then
// bubble up the poison. This is OK to do early, before
// exploring all internal imports, because the outer loop
// visits all internal packages anyway.
//
// In fact, stopping early is preferable - white subpackages
// won't have to iterate pointlessly through a parent path
// with no reachset.
colors[pkg] = black
return false
}
}
// Fully done with this pkg; no transitive problems.
colors[pkg] = black
return true
case grey:
// grey means an import cycle; guaranteed badness right here. You'd
// hope we never encounter it in a dependency (really? you published
// that code?), but we have to defend against it.
//
// FIXME handle import cycles by dropping everything involved. (i
// think we need to compute SCC, then drop *all* of them?)
colors[pkg] = black
poison(append(path, pkg)) // poison self and parents
case black:
// black means we're done with the package. If it has an entry in
// allreachsets, it completed successfully. If not, it was poisoned,
// and we need to bubble the poison back up.
rs, exists := allreachsets[pkg]
if !exists {
// just poison parents; self was necessarily already poisoned
poison(path)
return false
}
// It's good; pull over of the external imports from its reachset
// into all non-poisoned parent reachsets
for _, ppkg := range path {
if prs, exists := allreachsets[ppkg]; exists {
for ex := range rs {
prs[ex] = struct{}{}
}
}
}
return true
default:
panic(fmt.Sprintf("invalid color marker %v for %s", colors[pkg], pkg))
}
// shouldn't ever hit this
return false
}
// Run the depth-first exploration.
//
// Don't bother computing graph sources, this straightforward loop works
// comparably well, and fits nicely with an escape hatch in the dfe.
var path []string
for pkg := range workmap {
dfe(pkg, path)
}
if len(allreachsets) == 0 {
return nil
}
// Flatten allreachsets into the final reachlist
rt := strings.TrimSuffix(basedir, string(os.PathSeparator)) + string(os.PathSeparator)
rm := make(map[string][]string)
for pkg, rs := range allreachsets {
rlen := len(rs)
if rlen == 0 {
rm[strings.TrimPrefix(pkg, rt)] = nil
continue
}
edeps := make([]string, rlen)
k := 0
for opkg := range rs {
edeps[k] = opkg
k++
}
sort.Strings(edeps)
rm[strings.TrimPrefix(pkg, rt)] = edeps
}
return rm
}
// ListExternalImports computes a sorted, deduplicated list of all the external
// packages that are reachable through imports from all valid packages in a
// ReachMap, as computed by PackageTree.ExternalReach().
//
// main and tests determine whether main packages and test imports should be
// included in the calculation. "External" is defined as anything not prefixed,
// after path cleaning, by the PackageTree.ImportRoot. This includes stdlib.
//
// If an internal path is ignored, all of the external packages that it uniquely
// imports are omitted. Note, however, that no internal transitivity checks are
// made here - every non-ignored package in the tree is considered independently
// (with one set of exceptions, noted below). That means, given a PackageTree
// with root A and packages at A, A/foo, and A/bar, and the following import
// chain:
//
// A -> A/foo -> A/bar -> B/baz
//
// If you ignore A or A/foo, A/bar will still be visited, and B/baz will be
// returned, because this method visits ALL packages in the tree, not only those reachable
// from the root (or any other) packages. If your use case requires interrogating
// external imports with respect to only specific package entry points, you need
// ExternalReach() instead.
//
// It is safe to pass a nil map if there are no packages to ignore.
//
// If an internal package has an error (that is, PackageOrErr is Err), it is excluded from
// consideration. Internal packages that transitively import the error package
// are also excluded. So, if:
//
// -> B/foo
// /
// A
// \
// -> A/bar -> B/baz
//
// And A/bar has some error in it, then both A and A/bar will be eliminated from
// consideration; neither B/foo nor B/baz will be in the results. If A/bar, with
// its errors, is ignored, however, then A will remain, and B/foo will be in the
// results.
//
// Finally, note that if a directory is named "testdata", or has a leading dot
// or underscore, it will not be directly analyzed as a source. This is in
// keeping with Go tooling conventions that such directories should be ignored.
// So, if:
//
// A -> B/foo
// A/.bar -> B/baz
// A/_qux -> B/baz
// A/testdata -> B/baz
//
// Then B/foo will be returned, but B/baz will not, because all three of the
// packages that import it are in directories with disallowed names.
//
// HOWEVER, in keeping with the Go compiler, if one of those packages in a
// disallowed directory is imported by a package in an allowed directory, then
// it *will* be used. That is, while tools like go list will ignore a directory
// named .foo, you can still import from .foo. Thus, it must be included. So,
// if:
//
// -> B/foo
// /
// A
// \
// -> A/.bar -> B/baz
//
// A is legal, and it imports A/.bar, so the results will include B/baz.
func (rm ReachMap) ListExternalImports() []string {
exm := make(map[string]struct{})
for pkg, reach := range rm {
// Eliminate import paths with any elements having leading dots, leading
// underscores, or testdata. If these are internally reachable (which is
// a no-no, but possible), any external imports will have already been
// pulled up through ExternalReach. The key here is that we don't want
// to treat such packages as themselves being sources.
//
// TODO(sdboyer) strings.Split will always heap alloc, which isn't great to do
// in a loop like this. We could also just parse it ourselves...
var skip bool
for _, elem := range strings.Split(pkg, "/") {
if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
skip = true
break
}
}
if !skip {
for _, ex := range reach {
exm[ex] = struct{}{}
}
}
}
if len(exm) == 0 {
return nil
}
ex := make([]string, len(exm))
k := 0
for p := range exm {
ex[k] = p
k++
}
sort.Strings(ex)
return ex
}
// checkPrefixSlash checks to see if the prefix is a prefix of the string as-is,
// and that it is either equal OR the prefix + / is still a prefix.
func checkPrefixSlash(s, prefix string) bool {
if !strings.HasPrefix(s, prefix) {
return false
}
return s == prefix || strings.HasPrefix(s, ensureTrailingSlash(prefix))
}
func ensureTrailingSlash(s string) string {
return strings.TrimSuffix(s, string(os.PathSeparator)) + string(os.PathSeparator)
}
// helper func to merge, dedupe, and sort strings
func dedupeStrings(s1, s2 []string) (r []string) {
dedupe := make(map[string]bool)
if len(s1) > 0 && len(s2) > 0 {
for _, i := range s1 {
dedupe[i] = true
}
for _, i := range s2 {
dedupe[i] = true
}
for i := range dedupe {
r = append(r, i)
}
// And then re-sort them
sort.Strings(r)
} else if len(s1) > 0 {
r = s1
} else if len(s2) > 0 {
r = s2
}
return
}
|
package vsolver
import (
"bytes"
"fmt"
"go/build"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"text/scanner"
)
var osList []string
var archList []string
var stdlib = make(map[string]struct{})
const stdlibPkgs string = "archive archive/tar archive/zip bufio builtin bytes compress compress/bzip2 compress/flate compress/gzip compress/lzw compress/zlib container container/heap container/list container/ring context crypto crypto/aes crypto/cipher crypto/des crypto/dsa crypto/ecdsa crypto/elliptic crypto/hmac crypto/md5 crypto/rand crypto/rc4 crypto/rsa crypto/sha1 crypto/sha256 crypto/sha512 crypto/subtle crypto/tls crypto/x509 crypto/x509/pkix database database/sql database/sql/driver debug debug/dwarf debug/elf debug/gosym debug/macho debug/pe debug/plan9obj encoding encoding/ascii85 encoding/asn1 encoding/base32 encoding/base64 encoding/binary encoding/csv encoding/gob encoding/hex encoding/json encoding/pem encoding/xml errors expvar flag fmt go go/ast go/build go/constant go/doc go/format go/importer go/parser go/printer go/scanner go/token go/types hash hash/adler32 hash/crc32 hash/crc64 hash/fnv html html/template image image/color image/color/palette image/draw image/gif image/jpeg image/png index index/suffixarray io io/ioutil log log/syslog math math/big math/cmplx math/rand mime mime/multipart mime/quotedprintable net net/http net/http/cgi net/http/cookiejar net/http/fcgi net/http/httptest net/http/httputil net/http/pprof net/mail net/rpc net/rpc/jsonrpc net/smtp net/textproto net/url os os/exec os/signal os/user path path/filepath reflect regexp regexp/syntax runtime runtime/cgo runtime/debug runtime/msan runtime/pprof runtime/race runtime/trace sort strconv strings sync sync/atomic syscall testing testing/iotest testing/quick text text/scanner text/tabwriter text/template text/template/parse time unicode unicode/utf16 unicode/utf8 unsafe"
func init() {
// The supported systems are listed in
// https://github.com/golang/go/blob/master/src/go/build/syslist.go
// The lists are not exported so we need to duplicate them here.
osListString := "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows"
osList = strings.Split(osListString, " ")
archListString := "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64"
archList = strings.Split(archListString, " ")
for _, pkg := range strings.Split(stdlibPkgs, " ") {
stdlib[pkg] = struct{}{}
}
}
// listPackages lists info for all packages at or below the provided fileRoot.
//
// Directories without any valid Go files are excluded. Directories with
// multiple packages are excluded.
//
// The importRoot parameter is prepended to the relative path when determining
// the import path for each package. The obvious case is for something typical,
// like:
//
// fileRoot = "/home/user/go/src/github.com/foo/bar"
// importRoot = "github.com/foo/bar"
//
// where the fileRoot and importRoot align. However, if you provide:
//
// fileRoot = "/home/user/workspace/path/to/repo"
// importRoot = "github.com/foo/bar"
//
// then the root package at path/to/repo will be ascribed import path
// "github.com/foo/bar", and its subpackage "baz" will be
// "github.com/foo/bar/baz".
//
// A PackageTree is returned, which contains the ImportRoot and map of import path
// to PackageOrErr - each path under the root that exists will have either a
// Package, or an error describing why the directory is not a valid package.
func listPackages(fileRoot, importRoot string) (PackageTree, error) {
// Set up a build.ctx for parsing
ctx := build.Default
ctx.GOROOT = ""
ctx.GOPATH = ""
ctx.UseAllFiles = true
ptree := PackageTree{
ImportRoot: importRoot,
Packages: make(map[string]PackageOrErr),
}
// mkfilter returns two funcs that can be injected into a
// build.Context, letting us filter the results into an "in" and "out" set.
mkfilter := func(files map[string]struct{}) (in, out func(dir string) (fi []os.FileInfo, err error)) {
in = func(dir string) (fi []os.FileInfo, err error) {
all, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
for _, f := range all {
if _, exists := files[f.Name()]; exists {
fi = append(fi, f)
}
}
return fi, nil
}
out = func(dir string) (fi []os.FileInfo, err error) {
all, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
for _, f := range all {
if _, exists := files[f.Name()]; !exists {
fi = append(fi, f)
}
}
return fi, nil
}
return
}
// helper func to create a Package from a *build.Package
happy := func(importPath string, p *build.Package) Package {
// Happy path - simple parsing worked
pkg := Package{
ImportPath: importPath,
CommentPath: p.ImportComment,
Name: p.Name,
Imports: p.Imports,
TestImports: dedupeStrings(p.TestImports, p.XTestImports),
}
return pkg
}
err := filepath.Walk(fileRoot, func(path string, fi os.FileInfo, err error) error {
if err != nil && err != filepath.SkipDir {
return err
}
if !fi.IsDir() {
return nil
}
// Skip a few types of dirs
if !localSrcDir(fi) {
return filepath.SkipDir
}
// Compute the import path. Run the result through ToSlash(), so that windows
// paths are normalized to Unix separators, as import paths are expected
// to be.
ip := filepath.ToSlash(filepath.Join(importRoot, strings.TrimPrefix(path, fileRoot)))
// Find all the imports, across all os/arch combos
p, err := ctx.ImportDir(path, analysisImportMode())
var pkg Package
if err == nil {
pkg = happy(ip, p)
} else {
switch terr := err.(type) {
case *build.NoGoError:
ptree.Packages[ip] = PackageOrErr{
Err: err,
}
return nil
case *build.MultiplePackageError:
// Set this up preemptively, so we can easily just return out if
// something goes wrong. Otherwise, it'll get transparently
// overwritten later.
ptree.Packages[ip] = PackageOrErr{
Err: err,
}
// For now, we're punting entirely on dealing with os/arch
// combinations. That will be a more significant refactor.
//
// However, there is one case we want to allow here - a single
// file, with "+build ignore", that's a main package. (Ignore is
// just a convention, but for now it's good enough to just check
// that.) This is a fairly common way to make a more
// sophisticated build system than a Makefile allows, so we want
// to support that case. So, transparently lump the deps
// together.
mains := make(map[string]struct{})
for k, pkgname := range terr.Packages {
if pkgname == "main" {
tags, err2 := readFileBuildTags(filepath.Join(path, terr.Files[k]))
if err2 != nil {
return nil
}
var hasignore bool
for _, t := range tags {
if t == "ignore" {
hasignore = true
break
}
}
if !hasignore {
// No ignore tag found - bail out
return nil
}
mains[terr.Files[k]] = struct{}{}
}
}
// Make filtering funcs that will let us look only at the main
// files, and exclude the main files; inf and outf, respectively
inf, outf := mkfilter(mains)
// outf first; if there's another err there, we bail out with a
// return
ctx.ReadDir = outf
po, err2 := ctx.ImportDir(path, analysisImportMode())
if err2 != nil {
return nil
}
ctx.ReadDir = inf
pi, err2 := ctx.ImportDir(path, analysisImportMode())
if err2 != nil {
return nil
}
ctx.ReadDir = nil
// Use the other files as baseline, they're the main stuff
pkg = happy(ip, po)
mpkg := happy(ip, pi)
pkg.Imports = dedupeStrings(pkg.Imports, mpkg.Imports)
pkg.TestImports = dedupeStrings(pkg.TestImports, mpkg.TestImports)
default:
return err
}
}
ptree.Packages[ip] = PackageOrErr{
P: pkg,
}
return nil
})
if err != nil {
return PackageTree{}, err
}
return ptree, nil
}
type wm struct {
ex map[string]struct{}
in map[string]struct{}
}
// wmToReach takes an externalReach()-style workmap and transitively walks all
// internal imports until they reach an external path or terminate, then
// translates the results into a slice of external imports for each internal
// pkg.
//
// The basedir string, with a trailing slash ensured, will be stripped from the
// keys of the returned map.
func wmToReach(workmap map[string]wm, basedir string) (rm map[string][]string, err error) {
// Just brute-force through the workmap, repeating until we make no
// progress, either because no packages have any unresolved internal
// packages left (in which case we're done), or because some packages can't
// find something in the 'in' list (which shouldn't be possible)
//
// This implementation is hilariously inefficient in pure computational
// complexity terms - worst case is some flavor of polynomial, versus O(n)
// for the filesystem scan done in externalReach(). However, the coefficient
// for filesystem access is so much larger than for memory twiddling that it
// would probably take an absurdly large and snaky project to ever have that
// worst-case polynomial growth supercede (or even become comparable to) the
// linear side.
//
// But, if that day comes, we can improve this algorithm.
rm = make(map[string][]string)
var complete bool
for !complete {
var progress bool
complete = true
for pkg, w := range workmap {
if len(w.in) == 0 {
continue
}
complete = false
// Each pass should always empty the original in list, but there
// could be more in lists inherited from the other package
// (transitive internal deps)
for in := range w.in {
if w2, exists := workmap[in]; !exists {
return nil, fmt.Errorf("Should be impossible: %s depends on %s, but %s not in workmap", pkg, in, in)
} else {
progress = true
delete(w.in, in)
for i := range w2.ex {
w.ex[i] = struct{}{}
}
for i := range w2.in {
w.in[i] = struct{}{}
}
}
}
}
if !complete && !progress {
// Can't conceive of a way that we'd hit this, but this guards
// against infinite loop
panic("unreachable")
}
}
// finally, transform to slice for return
rm = make(map[string][]string)
// ensure we have a version of the basedir w/trailing slash, for stripping
rt := strings.TrimSuffix(basedir, string(os.PathSeparator)) + string(os.PathSeparator)
for pkg, w := range workmap {
if len(w.ex) == 0 {
rm[strings.TrimPrefix(pkg, rt)] = nil
continue
}
edeps := make([]string, len(w.ex))
k := 0
for opkg := range w.ex {
edeps[k] = opkg
k++
}
sort.Strings(edeps)
rm[strings.TrimPrefix(pkg, rt)] = edeps
}
return rm, nil
}
func localSrcDir(fi os.FileInfo) bool {
// Ignore _foo and .foo, and testdata
name := fi.Name()
if strings.HasPrefix(name, ".") || strings.HasPrefix(name, "_") || name == "testdata" {
return false
}
// Ignore dirs that are expressly intended for non-project source
switch name {
case "vendor", "Godeps":
return false
default:
return true
}
}
func readBuildTags(p string) ([]string, error) {
_, err := os.Stat(p)
if err != nil {
return []string{}, err
}
d, err := os.Open(p)
if err != nil {
return []string{}, err
}
objects, err := d.Readdir(-1)
if err != nil {
return []string{}, err
}
var tags []string
for _, obj := range objects {
// only process Go files
if strings.HasSuffix(obj.Name(), ".go") {
fp := filepath.Join(p, obj.Name())
co, err := readGoContents(fp)
if err != nil {
return []string{}, err
}
// Only look at places where we had a code comment.
if len(co) > 0 {
t := findTags(co)
for _, tg := range t {
found := false
for _, tt := range tags {
if tt == tg {
found = true
}
}
if !found {
tags = append(tags, tg)
}
}
}
}
}
return tags, nil
}
func readFileBuildTags(fp string) ([]string, error) {
co, err := readGoContents(fp)
if err != nil {
return []string{}, err
}
var tags []string
// Only look at places where we had a code comment.
if len(co) > 0 {
t := findTags(co)
for _, tg := range t {
found := false
for _, tt := range tags {
if tt == tg {
found = true
}
}
if !found {
tags = append(tags, tg)
}
}
}
return tags, nil
}
// Read contents of a Go file up to the package declaration. This can be used
// to find the the build tags.
func readGoContents(fp string) ([]byte, error) {
f, err := os.Open(fp)
defer f.Close()
if err != nil {
return []byte{}, err
}
var s scanner.Scanner
s.Init(f)
var tok rune
var pos scanner.Position
for tok != scanner.EOF {
tok = s.Scan()
// Getting the token text will skip comments by default.
tt := s.TokenText()
// build tags will not be after the package declaration.
if tt == "package" {
pos = s.Position
break
}
}
var buf bytes.Buffer
f.Seek(0, 0)
_, err = io.CopyN(&buf, f, int64(pos.Offset))
if err != nil {
return []byte{}, err
}
return buf.Bytes(), nil
}
// From a byte slice of a Go file find the tags.
func findTags(co []byte) []string {
p := co
var tgs []string
for len(p) > 0 {
line := p
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, p = line[:i], p[i+1:]
} else {
p = p[len(p):]
}
line = bytes.TrimSpace(line)
// Only look at comment lines that are well formed in the Go style
if bytes.HasPrefix(line, []byte("//")) {
line = bytes.TrimSpace(line[len([]byte("//")):])
if len(line) > 0 && line[0] == '+' {
f := strings.Fields(string(line))
// We've found a +build tag line.
if f[0] == "+build" {
for _, tg := range f[1:] {
tgs = append(tgs, tg)
}
}
}
}
}
return tgs
}
// Get an OS value that's not the one passed in.
func getOsValue(n string) string {
for _, o := range osList {
if o != n {
return o
}
}
return n
}
func isSupportedOs(n string) bool {
for _, o := range osList {
if o == n {
return true
}
}
return false
}
// Get an Arch value that's not the one passed in.
func getArchValue(n string) string {
for _, o := range archList {
if o != n {
return o
}
}
return n
}
func isSupportedArch(n string) bool {
for _, o := range archList {
if o == n {
return true
}
}
return false
}
func ensureTrailingSlash(s string) string {
return strings.TrimSuffix(s, string(os.PathSeparator)) + string(os.PathSeparator)
}
// helper func to merge, dedupe, and sort strings
func dedupeStrings(s1, s2 []string) (r []string) {
dedupe := make(map[string]bool)
if len(s1) > 0 && len(s2) > 0 {
for _, i := range s1 {
dedupe[i] = true
}
for _, i := range s2 {
dedupe[i] = true
}
for i := range dedupe {
r = append(r, i)
}
// And then re-sort them
sort.Strings(r)
} else if len(s1) > 0 {
r = s1
} else if len(s2) > 0 {
r = s2
}
return
}
// A PackageTree represents the results of recursively parsing a tree of
// packages, starting at the ImportRoot. The results of parsing the files in the
// directory identified by each import path - a Package or an error - are stored
// in the Packages map, keyed by that import path.
type PackageTree struct {
ImportRoot string
Packages map[string]PackageOrErr
}
// PackageOrErr stores the results of attempting to parse a single directory for
// Go source code.
type PackageOrErr struct {
P Package
Err error
}
// ExternalReach looks through a PackageTree and computes the list of external
// packages (not logical children of PackageTree.ImportRoot) that are
// transitively imported by the internal packages in the tree.
//
// main indicates whether (true) or not (false) to include main packages in the
// analysis. main packages should generally be excluded when analyzing the
// non-root dependency, as they inherently can't be imported.
//
// tests indicates whether (true) or not (false) to include imports from test
// files in packages when computing the reach map.
//
// ignore is a map of import paths that, if encountered, should be excluded from
// analysis. This exclusion applies to both internal and external packages. If
// an external import path is ignored, it is simply omitted from the results.
//
// If an internal path is ignored, then it is excluded from all transitive
// dependency chains and does not appear as a key in the final map. That is, if
// you ignore A/foo, then the external package list for all internal packages
// that import A/foo will not include external packages were only reachable
// through A/foo.
//
// Visually, this means that, given a PackageTree with root A and packages at A,
// A/foo, and A/bar, and the following import chain:
//
// A -> A/foo -> A/bar -> B/baz
//
// If you ignore A/foo, then the returned map would be:
//
// map[string][]string{
// "A": []string{},
// "A/bar": []string{"B/baz"},
// }
//
// It is safe to pass a nil map if there are no packages to ignore.
func (t PackageTree) ExternalReach(main, tests bool, ignore map[string]bool) (map[string][]string, error) {
var someerrs bool
if ignore == nil {
ignore = make(map[string]bool)
}
// world's simplest adjacency list
workmap := make(map[string]wm)
var imps []string
for ip, perr := range t.Packages {
if perr.Err != nil {
someerrs = true
continue
}
p := perr.P
// Skip main packages, unless param says otherwise
if p.Name == "main" && !main {
continue
}
// Skip ignored packages
if ignore[ip] {
continue
}
imps = imps[:0]
imps = p.Imports
if tests {
imps = dedupeStrings(imps, p.TestImports)
}
w := wm{
ex: make(map[string]struct{}),
in: make(map[string]struct{}),
}
for _, imp := range imps {
if ignore[imp] {
continue
}
if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) {
w.ex[imp] = struct{}{}
} else {
if w2, seen := workmap[imp]; seen {
for i := range w2.ex {
w.ex[i] = struct{}{}
}
for i := range w2.in {
w.in[i] = struct{}{}
}
} else {
w.in[imp] = struct{}{}
}
}
}
workmap[ip] = w
}
if len(workmap) == 0 {
if someerrs {
// TODO proper errs
return nil, fmt.Errorf("no packages without errors in %s", t.ImportRoot)
}
return nil, nil
}
//return wmToReach(workmap, t.ImportRoot)
return wmToReach(workmap, "") // TODO this passes tests, but doesn't seem right
}
// ListExternalImports computes a sorted, deduplicated list of all the external
// packages that are imported by all packages in the PackageTree.
//
// "External" is defined as anything not prefixed, after path cleaning, by the
// PackageTree.ImportRoot. This includes stdlib.
//
// If an internal path is ignored, all of the external packages that it uniquely
// imports are omitted. Note, however, that no internal transitivity checks are
// made here - every non-ignored package in the tree is considered
// independently. That means, given a PackageTree with root A and packages at A,
// A/foo, and A/bar, and the following import chain:
//
// A -> A/foo -> A/bar -> B/baz
//
// If you ignore A or A/foo, A/bar will still be visited, and B/baz will be
// returned, because this method visits ALL packages in the tree, not only those reachable
// from the root (or any other) packages. If your use case requires interrogating
// external imports with respect to only specific package entry points, you need
// ExternalReach() instead.
//
// It is safe to pass a nil map if there are no packages to ignore.
func (t PackageTree) ListExternalImports(main, tests bool, ignore map[string]bool) ([]string, error) {
var someerrs bool
exm := make(map[string]struct{})
if ignore == nil {
ignore = make(map[string]bool)
}
var imps []string
for ip, perr := range t.Packages {
if perr.Err != nil {
someerrs = true
continue
}
p := perr.P
// Skip main packages, unless param says otherwise
if p.Name == "main" && !main {
continue
}
// Skip ignored packages
if ignore[ip] {
continue
}
imps = imps[:0]
imps = p.Imports
if tests {
imps = dedupeStrings(imps, p.TestImports)
}
for _, imp := range imps {
if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) && !ignore[imp] {
exm[imp] = struct{}{}
}
}
}
if len(exm) == 0 {
if someerrs {
// TODO proper errs
return nil, fmt.Errorf("No packages without errors in %s", t.ImportRoot)
}
return nil, nil
}
ex := make([]string, len(exm))
k := 0
for p := range exm {
ex[k] = p
k++
}
sort.Strings(ex)
return ex, nil
}
// checkPrefixSlash checks to see if the prefix is a prefix of the string as-is,
// and that it is either equal OR the prefix + / is still a prefix.
func checkPrefixSlash(s, prefix string) bool {
if !strings.HasPrefix(s, prefix) {
return false
}
return s == prefix || strings.HasPrefix(s, ensureTrailingSlash(prefix))
}
Add error for local import paths.
Fixes #54. Or at least, fixes-ish it, for now.
package vsolver
import (
"bytes"
"fmt"
"go/build"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"text/scanner"
)
var osList []string
var archList []string
var stdlib = make(map[string]struct{})
const stdlibPkgs string = "archive archive/tar archive/zip bufio builtin bytes compress compress/bzip2 compress/flate compress/gzip compress/lzw compress/zlib container container/heap container/list container/ring context crypto crypto/aes crypto/cipher crypto/des crypto/dsa crypto/ecdsa crypto/elliptic crypto/hmac crypto/md5 crypto/rand crypto/rc4 crypto/rsa crypto/sha1 crypto/sha256 crypto/sha512 crypto/subtle crypto/tls crypto/x509 crypto/x509/pkix database database/sql database/sql/driver debug debug/dwarf debug/elf debug/gosym debug/macho debug/pe debug/plan9obj encoding encoding/ascii85 encoding/asn1 encoding/base32 encoding/base64 encoding/binary encoding/csv encoding/gob encoding/hex encoding/json encoding/pem encoding/xml errors expvar flag fmt go go/ast go/build go/constant go/doc go/format go/importer go/parser go/printer go/scanner go/token go/types hash hash/adler32 hash/crc32 hash/crc64 hash/fnv html html/template image image/color image/color/palette image/draw image/gif image/jpeg image/png index index/suffixarray io io/ioutil log log/syslog math math/big math/cmplx math/rand mime mime/multipart mime/quotedprintable net net/http net/http/cgi net/http/cookiejar net/http/fcgi net/http/httptest net/http/httputil net/http/pprof net/mail net/rpc net/rpc/jsonrpc net/smtp net/textproto net/url os os/exec os/signal os/user path path/filepath reflect regexp regexp/syntax runtime runtime/cgo runtime/debug runtime/msan runtime/pprof runtime/race runtime/trace sort strconv strings sync sync/atomic syscall testing testing/iotest testing/quick text text/scanner text/tabwriter text/template text/template/parse time unicode unicode/utf16 unicode/utf8 unsafe"
func init() {
// The supported systems are listed in
// https://github.com/golang/go/blob/master/src/go/build/syslist.go
// The lists are not exported so we need to duplicate them here.
osListString := "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows"
osList = strings.Split(osListString, " ")
archListString := "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64"
archList = strings.Split(archListString, " ")
for _, pkg := range strings.Split(stdlibPkgs, " ") {
stdlib[pkg] = struct{}{}
}
}
// listPackages lists info for all packages at or below the provided fileRoot.
//
// Directories without any valid Go files are excluded. Directories with
// multiple packages are excluded.
//
// The importRoot parameter is prepended to the relative path when determining
// the import path for each package. The obvious case is for something typical,
// like:
//
// fileRoot = "/home/user/go/src/github.com/foo/bar"
// importRoot = "github.com/foo/bar"
//
// where the fileRoot and importRoot align. However, if you provide:
//
// fileRoot = "/home/user/workspace/path/to/repo"
// importRoot = "github.com/foo/bar"
//
// then the root package at path/to/repo will be ascribed import path
// "github.com/foo/bar", and its subpackage "baz" will be
// "github.com/foo/bar/baz".
//
// A PackageTree is returned, which contains the ImportRoot and map of import path
// to PackageOrErr - each path under the root that exists will have either a
// Package, or an error describing why the directory is not a valid package.
func listPackages(fileRoot, importRoot string) (PackageTree, error) {
// Set up a build.ctx for parsing
ctx := build.Default
ctx.GOROOT = ""
ctx.GOPATH = ""
ctx.UseAllFiles = true
ptree := PackageTree{
ImportRoot: importRoot,
Packages: make(map[string]PackageOrErr),
}
// mkfilter returns two funcs that can be injected into a
// build.Context, letting us filter the results into an "in" and "out" set.
mkfilter := func(files map[string]struct{}) (in, out func(dir string) (fi []os.FileInfo, err error)) {
in = func(dir string) (fi []os.FileInfo, err error) {
all, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
for _, f := range all {
if _, exists := files[f.Name()]; exists {
fi = append(fi, f)
}
}
return fi, nil
}
out = func(dir string) (fi []os.FileInfo, err error) {
all, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
for _, f := range all {
if _, exists := files[f.Name()]; !exists {
fi = append(fi, f)
}
}
return fi, nil
}
return
}
// helper func to create a Package from a *build.Package
happy := func(importPath string, p *build.Package) Package {
// Happy path - simple parsing worked
pkg := Package{
ImportPath: importPath,
CommentPath: p.ImportComment,
Name: p.Name,
Imports: p.Imports,
TestImports: dedupeStrings(p.TestImports, p.XTestImports),
}
return pkg
}
err := filepath.Walk(fileRoot, func(path string, fi os.FileInfo, err error) error {
if err != nil && err != filepath.SkipDir {
return err
}
if !fi.IsDir() {
return nil
}
// Skip a few types of dirs
if !localSrcDir(fi) {
return filepath.SkipDir
}
// Compute the import path. Run the result through ToSlash(), so that windows
// paths are normalized to Unix separators, as import paths are expected
// to be.
ip := filepath.ToSlash(filepath.Join(importRoot, strings.TrimPrefix(path, fileRoot)))
// Find all the imports, across all os/arch combos
p, err := ctx.ImportDir(path, analysisImportMode())
var pkg Package
if err == nil {
pkg = happy(ip, p)
} else {
switch terr := err.(type) {
case *build.NoGoError:
ptree.Packages[ip] = PackageOrErr{
Err: err,
}
return nil
case *build.MultiplePackageError:
// Set this up preemptively, so we can easily just return out if
// something goes wrong. Otherwise, it'll get transparently
// overwritten later.
ptree.Packages[ip] = PackageOrErr{
Err: err,
}
// For now, we're punting entirely on dealing with os/arch
// combinations. That will be a more significant refactor.
//
// However, there is one case we want to allow here - a single
// file, with "+build ignore", that's a main package. (Ignore is
// just a convention, but for now it's good enough to just check
// that.) This is a fairly common way to make a more
// sophisticated build system than a Makefile allows, so we want
// to support that case. So, transparently lump the deps
// together.
mains := make(map[string]struct{})
for k, pkgname := range terr.Packages {
if pkgname == "main" {
tags, err2 := readFileBuildTags(filepath.Join(path, terr.Files[k]))
if err2 != nil {
return nil
}
var hasignore bool
for _, t := range tags {
if t == "ignore" {
hasignore = true
break
}
}
if !hasignore {
// No ignore tag found - bail out
return nil
}
mains[terr.Files[k]] = struct{}{}
}
}
// Make filtering funcs that will let us look only at the main
// files, and exclude the main files; inf and outf, respectively
inf, outf := mkfilter(mains)
// outf first; if there's another err there, we bail out with a
// return
ctx.ReadDir = outf
po, err2 := ctx.ImportDir(path, analysisImportMode())
if err2 != nil {
return nil
}
ctx.ReadDir = inf
pi, err2 := ctx.ImportDir(path, analysisImportMode())
if err2 != nil {
return nil
}
ctx.ReadDir = nil
// Use the other files as baseline, they're the main stuff
pkg = happy(ip, po)
mpkg := happy(ip, pi)
pkg.Imports = dedupeStrings(pkg.Imports, mpkg.Imports)
pkg.TestImports = dedupeStrings(pkg.TestImports, mpkg.TestImports)
default:
return err
}
}
// This area has some...fuzzy rules, but check all the imports for
// local/relative/dot-ness, and record an error for the package if we
// see any.
var lim []string
for _, imp := range append(pkg.Imports, pkg.TestImports...) {
switch {
// Do allow the single-dot, at least for now
case imp == "..":
lim = append(lim, imp)
// ignore stdlib done this way, b/c that's what the go tooling does
case strings.HasPrefix(imp, "./"):
if _, has := stdlib[imp[2:]]; !has {
lim = append(lim, imp)
}
case strings.HasPrefix(imp, "../"):
if _, has := stdlib[imp[3:]]; !has {
lim = append(lim, imp)
}
}
}
if len(lim) > 0 {
ptree.Packages[ip] = PackageOrErr{
Err: &LocalImportsError{
Dir: ip,
LocalImports: lim,
},
}
} else {
ptree.Packages[ip] = PackageOrErr{
P: pkg,
}
}
return nil
})
if err != nil {
return PackageTree{}, err
}
return ptree, nil
}
// LocalImportsError indicates that a package contains at least one relative
// import that will prevent it from compiling.
//
// TODO add a Files property once we're doing our own per-file parsing
type LocalImportsError struct {
Dir string
LocalImports []string
}
func (e *LocalImportsError) Error() string {
return fmt.Sprintf("import path %s had problematic local imports")
}
type wm struct {
ex map[string]struct{}
in map[string]struct{}
}
// wmToReach takes an externalReach()-style workmap and transitively walks all
// internal imports until they reach an external path or terminate, then
// translates the results into a slice of external imports for each internal
// pkg.
//
// The basedir string, with a trailing slash ensured, will be stripped from the
// keys of the returned map.
func wmToReach(workmap map[string]wm, basedir string) (rm map[string][]string, err error) {
// Just brute-force through the workmap, repeating until we make no
// progress, either because no packages have any unresolved internal
// packages left (in which case we're done), or because some packages can't
// find something in the 'in' list (which shouldn't be possible)
//
// This implementation is hilariously inefficient in pure computational
// complexity terms - worst case is some flavor of polynomial, versus O(n)
// for the filesystem scan done in externalReach(). However, the coefficient
// for filesystem access is so much larger than for memory twiddling that it
// would probably take an absurdly large and snaky project to ever have that
// worst-case polynomial growth supercede (or even become comparable to) the
// linear side.
//
// But, if that day comes, we can improve this algorithm.
rm = make(map[string][]string)
var complete bool
for !complete {
var progress bool
complete = true
for pkg, w := range workmap {
if len(w.in) == 0 {
continue
}
complete = false
// Each pass should always empty the original in list, but there
// could be more in lists inherited from the other package
// (transitive internal deps)
for in := range w.in {
if w2, exists := workmap[in]; !exists {
return nil, fmt.Errorf("Should be impossible: %s depends on %s, but %s not in workmap", pkg, in, in)
} else {
progress = true
delete(w.in, in)
for i := range w2.ex {
w.ex[i] = struct{}{}
}
for i := range w2.in {
w.in[i] = struct{}{}
}
}
}
}
if !complete && !progress {
// Can't conceive of a way that we'd hit this, but this guards
// against infinite loop
panic("unreachable")
}
}
// finally, transform to slice for return
rm = make(map[string][]string)
// ensure we have a version of the basedir w/trailing slash, for stripping
rt := strings.TrimSuffix(basedir, string(os.PathSeparator)) + string(os.PathSeparator)
for pkg, w := range workmap {
if len(w.ex) == 0 {
rm[strings.TrimPrefix(pkg, rt)] = nil
continue
}
edeps := make([]string, len(w.ex))
k := 0
for opkg := range w.ex {
edeps[k] = opkg
k++
}
sort.Strings(edeps)
rm[strings.TrimPrefix(pkg, rt)] = edeps
}
return rm, nil
}
func localSrcDir(fi os.FileInfo) bool {
// Ignore _foo and .foo, and testdata
name := fi.Name()
if strings.HasPrefix(name, ".") || strings.HasPrefix(name, "_") || name == "testdata" {
return false
}
// Ignore dirs that are expressly intended for non-project source
switch name {
case "vendor", "Godeps":
return false
default:
return true
}
}
func readBuildTags(p string) ([]string, error) {
_, err := os.Stat(p)
if err != nil {
return []string{}, err
}
d, err := os.Open(p)
if err != nil {
return []string{}, err
}
objects, err := d.Readdir(-1)
if err != nil {
return []string{}, err
}
var tags []string
for _, obj := range objects {
// only process Go files
if strings.HasSuffix(obj.Name(), ".go") {
fp := filepath.Join(p, obj.Name())
co, err := readGoContents(fp)
if err != nil {
return []string{}, err
}
// Only look at places where we had a code comment.
if len(co) > 0 {
t := findTags(co)
for _, tg := range t {
found := false
for _, tt := range tags {
if tt == tg {
found = true
}
}
if !found {
tags = append(tags, tg)
}
}
}
}
}
return tags, nil
}
func readFileBuildTags(fp string) ([]string, error) {
co, err := readGoContents(fp)
if err != nil {
return []string{}, err
}
var tags []string
// Only look at places where we had a code comment.
if len(co) > 0 {
t := findTags(co)
for _, tg := range t {
found := false
for _, tt := range tags {
if tt == tg {
found = true
}
}
if !found {
tags = append(tags, tg)
}
}
}
return tags, nil
}
// Read contents of a Go file up to the package declaration. This can be used
// to find the the build tags.
func readGoContents(fp string) ([]byte, error) {
f, err := os.Open(fp)
defer f.Close()
if err != nil {
return []byte{}, err
}
var s scanner.Scanner
s.Init(f)
var tok rune
var pos scanner.Position
for tok != scanner.EOF {
tok = s.Scan()
// Getting the token text will skip comments by default.
tt := s.TokenText()
// build tags will not be after the package declaration.
if tt == "package" {
pos = s.Position
break
}
}
var buf bytes.Buffer
f.Seek(0, 0)
_, err = io.CopyN(&buf, f, int64(pos.Offset))
if err != nil {
return []byte{}, err
}
return buf.Bytes(), nil
}
// From a byte slice of a Go file find the tags.
func findTags(co []byte) []string {
p := co
var tgs []string
for len(p) > 0 {
line := p
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, p = line[:i], p[i+1:]
} else {
p = p[len(p):]
}
line = bytes.TrimSpace(line)
// Only look at comment lines that are well formed in the Go style
if bytes.HasPrefix(line, []byte("//")) {
line = bytes.TrimSpace(line[len([]byte("//")):])
if len(line) > 0 && line[0] == '+' {
f := strings.Fields(string(line))
// We've found a +build tag line.
if f[0] == "+build" {
for _, tg := range f[1:] {
tgs = append(tgs, tg)
}
}
}
}
}
return tgs
}
// Get an OS value that's not the one passed in.
func getOsValue(n string) string {
for _, o := range osList {
if o != n {
return o
}
}
return n
}
func isSupportedOs(n string) bool {
for _, o := range osList {
if o == n {
return true
}
}
return false
}
// Get an Arch value that's not the one passed in.
func getArchValue(n string) string {
for _, o := range archList {
if o != n {
return o
}
}
return n
}
func isSupportedArch(n string) bool {
for _, o := range archList {
if o == n {
return true
}
}
return false
}
func ensureTrailingSlash(s string) string {
return strings.TrimSuffix(s, string(os.PathSeparator)) + string(os.PathSeparator)
}
// helper func to merge, dedupe, and sort strings
func dedupeStrings(s1, s2 []string) (r []string) {
dedupe := make(map[string]bool)
if len(s1) > 0 && len(s2) > 0 {
for _, i := range s1 {
dedupe[i] = true
}
for _, i := range s2 {
dedupe[i] = true
}
for i := range dedupe {
r = append(r, i)
}
// And then re-sort them
sort.Strings(r)
} else if len(s1) > 0 {
r = s1
} else if len(s2) > 0 {
r = s2
}
return
}
// A PackageTree represents the results of recursively parsing a tree of
// packages, starting at the ImportRoot. The results of parsing the files in the
// directory identified by each import path - a Package or an error - are stored
// in the Packages map, keyed by that import path.
type PackageTree struct {
ImportRoot string
Packages map[string]PackageOrErr
}
// PackageOrErr stores the results of attempting to parse a single directory for
// Go source code.
type PackageOrErr struct {
P Package
Err error
}
// ExternalReach looks through a PackageTree and computes the list of external
// packages (not logical children of PackageTree.ImportRoot) that are
// transitively imported by the internal packages in the tree.
//
// main indicates whether (true) or not (false) to include main packages in the
// analysis. main packages should generally be excluded when analyzing the
// non-root dependency, as they inherently can't be imported.
//
// tests indicates whether (true) or not (false) to include imports from test
// files in packages when computing the reach map.
//
// ignore is a map of import paths that, if encountered, should be excluded from
// analysis. This exclusion applies to both internal and external packages. If
// an external import path is ignored, it is simply omitted from the results.
//
// If an internal path is ignored, then it is excluded from all transitive
// dependency chains and does not appear as a key in the final map. That is, if
// you ignore A/foo, then the external package list for all internal packages
// that import A/foo will not include external packages were only reachable
// through A/foo.
//
// Visually, this means that, given a PackageTree with root A and packages at A,
// A/foo, and A/bar, and the following import chain:
//
// A -> A/foo -> A/bar -> B/baz
//
// If you ignore A/foo, then the returned map would be:
//
// map[string][]string{
// "A": []string{},
// "A/bar": []string{"B/baz"},
// }
//
// It is safe to pass a nil map if there are no packages to ignore.
func (t PackageTree) ExternalReach(main, tests bool, ignore map[string]bool) (map[string][]string, error) {
var someerrs bool
if ignore == nil {
ignore = make(map[string]bool)
}
// world's simplest adjacency list
workmap := make(map[string]wm)
var imps []string
for ip, perr := range t.Packages {
if perr.Err != nil {
someerrs = true
continue
}
p := perr.P
// Skip main packages, unless param says otherwise
if p.Name == "main" && !main {
continue
}
// Skip ignored packages
if ignore[ip] {
continue
}
imps = imps[:0]
imps = p.Imports
if tests {
imps = dedupeStrings(imps, p.TestImports)
}
w := wm{
ex: make(map[string]struct{}),
in: make(map[string]struct{}),
}
for _, imp := range imps {
if ignore[imp] {
continue
}
if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) {
w.ex[imp] = struct{}{}
} else {
if w2, seen := workmap[imp]; seen {
for i := range w2.ex {
w.ex[i] = struct{}{}
}
for i := range w2.in {
w.in[i] = struct{}{}
}
} else {
w.in[imp] = struct{}{}
}
}
}
workmap[ip] = w
}
if len(workmap) == 0 {
if someerrs {
// TODO proper errs
return nil, fmt.Errorf("no packages without errors in %s", t.ImportRoot)
}
return nil, nil
}
//return wmToReach(workmap, t.ImportRoot)
return wmToReach(workmap, "") // TODO this passes tests, but doesn't seem right
}
// ListExternalImports computes a sorted, deduplicated list of all the external
// packages that are imported by all packages in the PackageTree.
//
// "External" is defined as anything not prefixed, after path cleaning, by the
// PackageTree.ImportRoot. This includes stdlib.
//
// If an internal path is ignored, all of the external packages that it uniquely
// imports are omitted. Note, however, that no internal transitivity checks are
// made here - every non-ignored package in the tree is considered
// independently. That means, given a PackageTree with root A and packages at A,
// A/foo, and A/bar, and the following import chain:
//
// A -> A/foo -> A/bar -> B/baz
//
// If you ignore A or A/foo, A/bar will still be visited, and B/baz will be
// returned, because this method visits ALL packages in the tree, not only those reachable
// from the root (or any other) packages. If your use case requires interrogating
// external imports with respect to only specific package entry points, you need
// ExternalReach() instead.
//
// It is safe to pass a nil map if there are no packages to ignore.
func (t PackageTree) ListExternalImports(main, tests bool, ignore map[string]bool) ([]string, error) {
var someerrs bool
exm := make(map[string]struct{})
if ignore == nil {
ignore = make(map[string]bool)
}
var imps []string
for ip, perr := range t.Packages {
if perr.Err != nil {
someerrs = true
continue
}
p := perr.P
// Skip main packages, unless param says otherwise
if p.Name == "main" && !main {
continue
}
// Skip ignored packages
if ignore[ip] {
continue
}
imps = imps[:0]
imps = p.Imports
if tests {
imps = dedupeStrings(imps, p.TestImports)
}
for _, imp := range imps {
if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) && !ignore[imp] {
exm[imp] = struct{}{}
}
}
}
if len(exm) == 0 {
if someerrs {
// TODO proper errs
return nil, fmt.Errorf("No packages without errors in %s", t.ImportRoot)
}
return nil, nil
}
ex := make([]string, len(exm))
k := 0
for p := range exm {
ex[k] = p
k++
}
sort.Strings(ex)
return ex, nil
}
// checkPrefixSlash checks to see if the prefix is a prefix of the string as-is,
// and that it is either equal OR the prefix + / is still a prefix.
func checkPrefixSlash(s, prefix string) bool {
if !strings.HasPrefix(s, prefix) {
return false
}
return s == prefix || strings.HasPrefix(s, ensureTrailingSlash(prefix))
}
|
package gps
import (
"bytes"
"errors"
"fmt"
"go/build"
"go/parser"
gscan "go/scanner"
"go/token"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"text/scanner"
"unicode"
)
var (
osList []string
archList []string
ignoreTags = []string{} //[]string{"appengine", "ignore"} //TODO: appengine is a special case for now: https://github.com/tools/godep/issues/353
)
func init() {
// The supported systems are listed in
// https://github.com/golang/go/blob/master/src/go/build/syslist.go
// The lists are not exported, so we need to duplicate them here.
osListString := "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows"
osList = strings.Split(osListString, " ")
archListString := "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64"
archList = strings.Split(archListString, " ")
}
// Stored as a var so that tests can swap it out. Ugh globals, ugh.
var isStdLib = doIsStdLib
// This was loving taken from src/cmd/go/pkg.go in Go's code (isStandardImportPath).
func doIsStdLib(path string) bool {
i := strings.Index(path, "/")
if i < 0 {
i = len(path)
}
return !strings.Contains(path[:i], ".")
}
// ListPackages reports Go package information about all directories in the tree
// at or below the provided fileRoot.
//
// Directories without any valid Go files are excluded. Directories with
// multiple packages are excluded.
//
// The importRoot parameter is prepended to the relative path when determining
// the import path for each package. The obvious case is for something typical,
// like:
//
// fileRoot = "/home/user/go/src/github.com/foo/bar"
// importRoot = "github.com/foo/bar"
//
// where the fileRoot and importRoot align. However, if you provide:
//
// fileRoot = "/home/user/workspace/path/to/repo"
// importRoot = "github.com/foo/bar"
//
// then the root package at path/to/repo will be ascribed import path
// "github.com/foo/bar", and the package at
// "/home/user/workspace/path/to/repo/baz" will be "github.com/foo/bar/baz".
//
// A PackageTree is returned, which contains the ImportRoot and map of import path
// to PackageOrErr - each path under the root that exists will have either a
// Package, or an error describing why the directory is not a valid package.
func ListPackages(fileRoot, importRoot string) (PackageTree, error) {
//fmt.Printf("ListPackages(%q,%q)\n", fileRoot, importRoot)
// Set up a build.ctx for parsing
ctx := build.Default
ctx.GOROOT = ""
ctx.GOPATH = ""
ctx.UseAllFiles = true
ptree := PackageTree{
ImportRoot: importRoot,
Packages: make(map[string]PackageOrErr),
}
// helper func to create a Package from a *build.Package
happy := func(importPath string, p *build.Package) Package {
// Happy path - simple parsing worked
pkg := Package{
ImportPath: importPath,
CommentPath: p.ImportComment,
Name: p.Name,
Imports: p.Imports,
TestImports: dedupeStrings(p.TestImports, p.XTestImports),
}
return pkg
}
var err error
fileRoot, err = filepath.Abs(fileRoot)
if err != nil {
return PackageTree{}, err
}
err = filepath.Walk(fileRoot, func(wp string, fi os.FileInfo, err error) error {
if err != nil && err != filepath.SkipDir {
return err
}
if !fi.IsDir() {
return nil
}
// Skip dirs that are known to hold non-local/dependency code.
//
// We don't skip _*, or testdata dirs because, while it may be poor
// form, importing them is not a compilation error.
switch fi.Name() {
case "vendor", "Godeps":
return filepath.SkipDir
}
// We do skip dot-dirs, though, because it's such a ubiquitous standard
// that they not be visited by normal commands, and because things get
// really weird if we don't.
if strings.HasPrefix(fi.Name(), ".") {
return filepath.SkipDir
}
// Compute the import path. Run the result through ToSlash(), so that windows
// paths are normalized to Unix separators, as import paths are expected
// to be.
ip := filepath.ToSlash(filepath.Join(importRoot, strings.TrimPrefix(wp, fileRoot)))
// Find all the imports, across all os/arch combos
//p, err := fullPackageInDir(wp)
p := &build.Package{
Dir: wp,
}
err = fillPackage(p)
var pkg Package
if err == nil {
pkg = happy(ip, p)
} else {
switch err.(type) {
case gscan.ErrorList, *gscan.Error, *build.NoGoError:
// This happens if we encounter malformed or nonexistent Go
// source code
ptree.Packages[ip] = PackageOrErr{
Err: err,
}
return nil
default:
return err
}
}
// This area has some...fuzzy rules, but check all the imports for
// local/relative/dot-ness, and record an error for the package if we
// see any.
var lim []string
for _, imp := range append(pkg.Imports, pkg.TestImports...) {
switch {
// Do allow the single-dot, at least for now
case imp == "..":
lim = append(lim, imp)
// ignore stdlib done this way, b/c that's what the go tooling does
case strings.HasPrefix(imp, "./"):
if isStdLib(imp[2:]) {
lim = append(lim, imp)
}
case strings.HasPrefix(imp, "../"):
if isStdLib(imp[3:]) {
lim = append(lim, imp)
}
}
}
if len(lim) > 0 {
ptree.Packages[ip] = PackageOrErr{
Err: &LocalImportsError{
Dir: ip,
LocalImports: lim,
},
}
} else {
ptree.Packages[ip] = PackageOrErr{
P: pkg,
}
}
return nil
})
if err != nil {
return PackageTree{}, err
}
return ptree, nil
}
// fillPackage full of info. Assumes p.Dir is set at a minimum
func fillPackage(p *build.Package) error {
if p.SrcRoot == "" {
for _, base := range build.Default.SrcDirs() {
if strings.HasPrefix(p.Dir, base) {
p.SrcRoot = base
}
}
}
if p.SrcRoot == "" {
return errors.New("Unable to find SrcRoot for package " + p.ImportPath)
}
if p.Root == "" {
p.Root = filepath.Dir(p.SrcRoot)
}
var buildMatch = "+build "
var buildFieldSplit = func(r rune) bool {
return unicode.IsSpace(r) || r == ','
}
gofiles, err := filepath.Glob(filepath.Join(p.Dir, "*.go"))
if err != nil {
return err
}
if len(gofiles) == 0 {
return &build.NoGoError{Dir: p.Dir}
}
var testImports []string
var imports []string
NextFile:
for _, file := range gofiles {
pf, err := parser.ParseFile(token.NewFileSet(), file, nil, parser.ImportsOnly|parser.ParseComments)
if err != nil {
return err
}
testFile := strings.HasSuffix(file, "_test.go")
fname := filepath.Base(file)
for _, c := range pf.Comments {
if c.Pos() > pf.Package { // +build must come before package
continue
}
ct := c.Text()
if i := strings.Index(ct, buildMatch); i != -1 {
for _, t := range strings.FieldsFunc(ct[i+len(buildMatch):], buildFieldSplit) {
for _, tag := range ignoreTags {
if t == tag {
p.IgnoredGoFiles = append(p.IgnoredGoFiles, fname)
continue NextFile
}
}
}
}
}
if testFile {
p.TestGoFiles = append(p.TestGoFiles, fname)
if p.Name == "" {
p.Name = strings.TrimSuffix(pf.Name.Name, "_test")
}
} else {
if p.Name == "" {
p.Name = pf.Name.Name
}
p.GoFiles = append(p.GoFiles, fname)
}
for _, is := range pf.Imports {
name, err := strconv.Unquote(is.Path.Value)
if err != nil {
return err // can't happen?
}
if testFile {
testImports = append(testImports, name)
} else {
imports = append(imports, name)
}
}
}
imports = uniq(imports)
testImports = uniq(testImports)
p.Imports = imports
p.TestImports = testImports
return nil
}
// LocalImportsError indicates that a package contains at least one relative
// import that will prevent it from compiling.
//
// TODO(sdboyer) add a Files property once we're doing our own per-file parsing
type LocalImportsError struct {
Dir string
LocalImports []string
}
func (e *LocalImportsError) Error() string {
return fmt.Sprintf("import path %s had problematic local imports", e.Dir)
}
func readFileBuildTags(fp string) ([]string, error) {
co, err := readGoContents(fp)
if err != nil {
return []string{}, err
}
var tags []string
// Only look at places where we had a code comment.
if len(co) > 0 {
t := findTags(co)
for _, tg := range t {
found := false
for _, tt := range tags {
if tt == tg {
found = true
}
}
if !found {
tags = append(tags, tg)
}
}
}
return tags, nil
}
// Read contents of a Go file up to the package declaration. This can be used
// to find the the build tags.
func readGoContents(fp string) ([]byte, error) {
f, err := os.Open(fp)
defer f.Close()
if err != nil {
return []byte{}, err
}
var s scanner.Scanner
s.Init(f)
var tok rune
var pos scanner.Position
for tok != scanner.EOF {
tok = s.Scan()
// Getting the token text will skip comments by default.
tt := s.TokenText()
// build tags will not be after the package declaration.
if tt == "package" {
pos = s.Position
break
}
}
var buf bytes.Buffer
f.Seek(0, 0)
_, err = io.CopyN(&buf, f, int64(pos.Offset))
if err != nil {
return []byte{}, err
}
return buf.Bytes(), nil
}
// From a byte slice of a Go file find the tags.
func findTags(co []byte) []string {
p := co
var tgs []string
for len(p) > 0 {
line := p
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, p = line[:i], p[i+1:]
} else {
p = p[len(p):]
}
line = bytes.TrimSpace(line)
// Only look at comment lines that are well formed in the Go style
if bytes.HasPrefix(line, []byte("//")) {
line = bytes.TrimSpace(line[len([]byte("//")):])
if len(line) > 0 && line[0] == '+' {
f := strings.Fields(string(line))
// We've found a +build tag line.
if f[0] == "+build" {
for _, tg := range f[1:] {
tgs = append(tgs, tg)
}
}
}
}
}
return tgs
}
// A PackageTree represents the results of recursively parsing a tree of
// packages, starting at the ImportRoot. The results of parsing the files in the
// directory identified by each import path - a Package or an error - are stored
// in the Packages map, keyed by that import path.
type PackageTree struct {
ImportRoot string
Packages map[string]PackageOrErr
}
// dup copies the PackageTree.
//
// This is really only useful as a defensive measure to prevent external state
// mutations.
func (t PackageTree) dup() PackageTree {
t2 := PackageTree{
ImportRoot: t.ImportRoot,
Packages: map[string]PackageOrErr{},
}
for path, poe := range t.Packages {
poe2 := PackageOrErr{
Err: poe.Err,
P: poe.P,
}
if len(poe.P.Imports) > 0 {
poe2.P.Imports = make([]string, len(poe.P.Imports))
copy(poe2.P.Imports, poe.P.Imports)
}
if len(poe.P.TestImports) > 0 {
poe2.P.TestImports = make([]string, len(poe.P.TestImports))
copy(poe2.P.TestImports, poe.P.TestImports)
}
t2.Packages[path] = poe2
}
return t2
}
type wm struct {
err error
ex map[string]bool
in map[string]bool
}
// PackageOrErr stores the results of attempting to parse a single directory for
// Go source code.
type PackageOrErr struct {
P Package
Err error
}
// ReachMap maps a set of import paths (keys) to the set of external packages
// transitively reachable from the packages at those import paths.
//
// See PackageTree.ExternalReach() for more information.
type ReachMap map[string][]string
// ExternalReach looks through a PackageTree and computes the list of external
// import statements (that is, import statements pointing to packages that are
// not logical children of PackageTree.ImportRoot) that are transitively
// imported by the internal packages in the tree.
//
// main indicates whether (true) or not (false) to include main packages in the
// analysis. When utilized by gps' solver, main packages are generally excluded
// from analyzing anything other than the root project, as they necessarily can't
// be imported.
//
// tests indicates whether (true) or not (false) to include imports from test
// files in packages when computing the reach map.
//
// ignore is a map of import paths that, if encountered, should be excluded from
// analysis. This exclusion applies to both internal and external packages. If
// an external import path is ignored, it is simply omitted from the results.
//
// If an internal path is ignored, then not only does it not appear in the final
// map, but it is also excluded from the transitive calculations of other
// internal packages. That is, if you ignore A/foo, then the external package
// list for all internal packages that import A/foo will not include external
// packages that are only reachable through A/foo.
//
// Visually, this means that, given a PackageTree with root A and packages at A,
// A/foo, and A/bar, and the following import chain:
//
// A -> A/foo -> A/bar -> B/baz
//
// In this configuration, all of A's packages transitively import B/baz, so the
// returned map would be:
//
// map[string][]string{
// "A": []string{"B/baz"},
// "A/foo": []string{"B/baz"}
// "A/bar": []string{"B/baz"},
// }
//
// However, if you ignore A/foo, then A's path to B/baz is broken, and A/foo is
// omitted entirely. Thus, the returned map would be:
//
// map[string][]string{
// "A": []string{},
// "A/bar": []string{"B/baz"},
// }
//
// If there are no packages to ignore, it is safe to pass a nil map.
func (t PackageTree) ExternalReach(main, tests bool, ignore map[string]bool) ReachMap {
if ignore == nil {
ignore = make(map[string]bool)
}
// world's simplest adjacency list
workmap := make(map[string]wm)
var imps []string
for ip, perr := range t.Packages {
if perr.Err != nil {
workmap[ip] = wm{
err: perr.Err,
}
continue
}
p := perr.P
// Skip main packages, unless param says otherwise
if p.Name == "main" && !main {
continue
}
// Skip ignored packages
if ignore[ip] {
continue
}
imps = imps[:0]
imps = p.Imports
if tests {
imps = dedupeStrings(imps, p.TestImports)
}
w := wm{
ex: make(map[string]bool),
in: make(map[string]bool),
}
for _, imp := range imps {
// Skip ignored imports
if ignore[imp] {
continue
}
if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) {
w.ex[imp] = true
} else {
if w2, seen := workmap[imp]; seen {
for i := range w2.ex {
w.ex[i] = true
}
for i := range w2.in {
w.in[i] = true
}
} else {
w.in[imp] = true
}
}
}
workmap[ip] = w
}
//return wmToReach(workmap, t.ImportRoot)
return wmToReach(workmap, "") // TODO(sdboyer) this passes tests, but doesn't seem right
}
// wmToReach takes an internal "workmap" constructed by
// PackageTree.ExternalReach(), transitively walks (via depth-first traversal)
// all internal imports until they reach an external path or terminate, then
// translates the results into a slice of external imports for each internal
// pkg.
//
// The basedir string, with a trailing slash ensured, will be stripped from the
// keys of the returned map.
//
// This is mostly separated out for testing purposes.
func wmToReach(workmap map[string]wm, basedir string) map[string][]string {
// Uses depth-first exploration to compute reachability into external
// packages, dropping any internal packages on "poisoned paths" - a path
// containing a package with an error, or with a dep on an internal package
// that's missing.
const (
white uint8 = iota
grey
black
)
colors := make(map[string]uint8)
allreachsets := make(map[string]map[string]struct{})
// poison is a helper func to eliminate specific reachsets from allreachsets
poison := func(path []string) {
for _, ppkg := range path {
delete(allreachsets, ppkg)
}
}
var dfe func(string, []string) bool
// dfe is the depth-first-explorer that computes a safe, error-free external
// reach map.
//
// pkg is the import path of the pkg currently being visited; path is the
// stack of parent packages we've visited to get to pkg. The return value
// indicates whether the level completed successfully (true) or if it was
// poisoned (false).
//
// TODO(sdboyer) some deft improvements could probably be made by passing the list of
// parent reachsets, rather than a list of parent package string names.
// might be able to eliminate the use of allreachsets map-of-maps entirely.
dfe = func(pkg string, path []string) bool {
// white is the zero value of uint8, which is what we want if the pkg
// isn't in the colors map, so this works fine
switch colors[pkg] {
case white:
// first visit to this pkg; mark it as in-process (grey)
colors[pkg] = grey
// make sure it's present and w/out errs
w, exists := workmap[pkg]
if !exists || w.err != nil {
// Does not exist or has an err; poison self and all parents
poison(path)
// we know we're done here, so mark it black
colors[pkg] = black
return false
}
// pkg exists with no errs. mark it as in-process (grey), and start
// a reachmap for it
//
// TODO(sdboyer) use sync.Pool here? can be lots of explicit map alloc/dealloc
rs := make(map[string]struct{})
// Push self onto the path slice. Passing this as a value has the
// effect of auto-popping the slice, while also giving us safe
// memory reuse.
path = append(path, pkg)
// Dump this package's external pkgs into its own reachset. Separate
// loop from the parent dump to avoid nested map loop lookups.
for ex := range w.ex {
rs[ex] = struct{}{}
}
allreachsets[pkg] = rs
// Push this pkg's external imports into all parent reachsets. Not
// all parents will necessarily have a reachset; none, some, or all
// could have been poisoned by a different path than what we're on
// right now. (Or we could be at depth 0)
for _, ppkg := range path {
if prs, exists := allreachsets[ppkg]; exists {
for ex := range w.ex {
prs[ex] = struct{}{}
}
}
}
// Now, recurse until done, or a false bubbles up, indicating the
// path is poisoned.
var clean bool
for in := range w.in {
// It's possible, albeit weird, for a package to import itself.
// If we try to visit self, though, then it erroneously poisons
// the path, as it would be interpreted as grey. In reality,
// this becomes a no-op, so just skip it.
if in == pkg {
continue
}
clean = dfe(in, path)
if !clean {
// Path is poisoned. Our reachmap was already deleted by the
// path we're returning from; mark ourselves black, then
// bubble up the poison. This is OK to do early, before
// exploring all internal imports, because the outer loop
// visits all internal packages anyway.
//
// In fact, stopping early is preferable - white subpackages
// won't have to iterate pointlessly through a parent path
// with no reachset.
colors[pkg] = black
return false
}
}
// Fully done with this pkg; no transitive problems.
colors[pkg] = black
return true
case grey:
// grey means an import cycle; guaranteed badness right here. You'd
// hope we never encounter it in a dependency (really? you published
// that code?), but we have to defend against it.
//
// FIXME handle import cycles by dropping everything involved. (i
// think we need to compute SCC, then drop *all* of them?)
colors[pkg] = black
poison(append(path, pkg)) // poison self and parents
case black:
// black means we're done with the package. If it has an entry in
// allreachsets, it completed successfully. If not, it was poisoned,
// and we need to bubble the poison back up.
rs, exists := allreachsets[pkg]
if !exists {
// just poison parents; self was necessarily already poisoned
poison(path)
return false
}
// It's good; pull over of the external imports from its reachset
// into all non-poisoned parent reachsets
for _, ppkg := range path {
if prs, exists := allreachsets[ppkg]; exists {
for ex := range rs {
prs[ex] = struct{}{}
}
}
}
return true
default:
panic(fmt.Sprintf("invalid color marker %v for %s", colors[pkg], pkg))
}
// shouldn't ever hit this
return false
}
// Run the depth-first exploration.
//
// Don't bother computing graph sources, this straightforward loop works
// comparably well, and fits nicely with an escape hatch in the dfe.
var path []string
for pkg := range workmap {
dfe(pkg, path)
}
if len(allreachsets) == 0 {
return nil
}
// Flatten allreachsets into the final reachlist
rt := strings.TrimSuffix(basedir, string(os.PathSeparator)) + string(os.PathSeparator)
rm := make(map[string][]string)
for pkg, rs := range allreachsets {
rlen := len(rs)
if rlen == 0 {
rm[strings.TrimPrefix(pkg, rt)] = nil
continue
}
edeps := make([]string, rlen)
k := 0
for opkg := range rs {
edeps[k] = opkg
k++
}
sort.Strings(edeps)
rm[strings.TrimPrefix(pkg, rt)] = edeps
}
return rm
}
// ListExternalImports computes a sorted, deduplicated list of all the external
// packages that are reachable through imports from all valid packages in a
// ReachMap, as computed by PackageTree.ExternalReach().
//
// main and tests determine whether main packages and test imports should be
// included in the calculation. "External" is defined as anything not prefixed,
// after path cleaning, by the PackageTree.ImportRoot. This includes stdlib.
//
// If an internal path is ignored, all of the external packages that it uniquely
// imports are omitted. Note, however, that no internal transitivity checks are
// made here - every non-ignored package in the tree is considered independently
// (with one set of exceptions, noted below). That means, given a PackageTree
// with root A and packages at A, A/foo, and A/bar, and the following import
// chain:
//
// A -> A/foo -> A/bar -> B/baz
//
// If you ignore A or A/foo, A/bar will still be visited, and B/baz will be
// returned, because this method visits ALL packages in the tree, not only those reachable
// from the root (or any other) packages. If your use case requires interrogating
// external imports with respect to only specific package entry points, you need
// ExternalReach() instead.
//
// It is safe to pass a nil map if there are no packages to ignore.
//
// If an internal package has an error (that is, PackageOrErr is Err), it is excluded from
// consideration. Internal packages that transitively import the error package
// are also excluded. So, if:
//
// -> B/foo
// /
// A
// \
// -> A/bar -> B/baz
//
// And A/bar has some error in it, then both A and A/bar will be eliminated from
// consideration; neither B/foo nor B/baz will be in the results. If A/bar, with
// its errors, is ignored, however, then A will remain, and B/foo will be in the
// results.
//
// Finally, note that if a directory is named "testdata", or has a leading dot
// or underscore, it will not be directly analyzed as a source. This is in
// keeping with Go tooling conventions that such directories should be ignored.
// So, if:
//
// A -> B/foo
// A/.bar -> B/baz
// A/_qux -> B/baz
// A/testdata -> B/baz
//
// Then B/foo will be returned, but B/baz will not, because all three of the
// packages that import it are in directories with disallowed names.
//
// HOWEVER, in keeping with the Go compiler, if one of those packages in a
// disallowed directory is imported by a package in an allowed directory, then
// it *will* be used. That is, while tools like go list will ignore a directory
// named .foo, you can still import from .foo. Thus, it must be included. So,
// if:
//
// -> B/foo
// /
// A
// \
// -> A/.bar -> B/baz
//
// A is legal, and it imports A/.bar, so the results will include B/baz.
func (rm ReachMap) ListExternalImports() []string {
exm := make(map[string]struct{})
for pkg, reach := range rm {
// Eliminate import paths with any elements having leading dots, leading
// underscores, or testdata. If these are internally reachable (which is
// a no-no, but possible), any external imports will have already been
// pulled up through ExternalReach. The key here is that we don't want
// to treat such packages as themselves being sources.
//
// TODO(sdboyer) strings.Split will always heap alloc, which isn't great to do
// in a loop like this. We could also just parse it ourselves...
var skip bool
for _, elem := range strings.Split(pkg, "/") {
if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
skip = true
break
}
}
if !skip {
for _, ex := range reach {
exm[ex] = struct{}{}
}
}
}
if len(exm) == 0 {
return nil
}
ex := make([]string, len(exm))
k := 0
for p := range exm {
ex[k] = p
k++
}
sort.Strings(ex)
return ex
}
// checkPrefixSlash checks to see if the prefix is a prefix of the string as-is,
// and that it is either equal OR the prefix + / is still a prefix.
func checkPrefixSlash(s, prefix string) bool {
if !strings.HasPrefix(s, prefix) {
return false
}
return s == prefix || strings.HasPrefix(s, ensureTrailingSlash(prefix))
}
func ensureTrailingSlash(s string) string {
return strings.TrimSuffix(s, string(os.PathSeparator)) + string(os.PathSeparator)
}
// helper func to merge, dedupe, and sort strings
func dedupeStrings(s1, s2 []string) (r []string) {
dedupe := make(map[string]bool)
if len(s1) > 0 && len(s2) > 0 {
for _, i := range s1 {
dedupe[i] = true
}
for _, i := range s2 {
dedupe[i] = true
}
for i := range dedupe {
r = append(r, i)
}
// And then re-sort them
sort.Strings(r)
} else if len(s1) > 0 {
r = s1
} else if len(s2) > 0 {
r = s2
}
return
}
func uniq(a []string) []string {
if a == nil {
return make([]string, 0)
}
var s string
var i int
if !sort.StringsAreSorted(a) {
sort.Strings(a)
}
for _, t := range a {
if t != s {
a[i] = t
i++
s = t
}
}
return a[:i]
}
Remove now-superfluous local build.Context obj
package gps
import (
"bytes"
"errors"
"fmt"
"go/build"
"go/parser"
gscan "go/scanner"
"go/token"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"text/scanner"
"unicode"
)
var (
osList []string
archList []string
ignoreTags = []string{} //[]string{"appengine", "ignore"} //TODO: appengine is a special case for now: https://github.com/tools/godep/issues/353
)
func init() {
// The supported systems are listed in
// https://github.com/golang/go/blob/master/src/go/build/syslist.go
// The lists are not exported, so we need to duplicate them here.
osListString := "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows"
osList = strings.Split(osListString, " ")
archListString := "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64"
archList = strings.Split(archListString, " ")
}
// Stored as a var so that tests can swap it out. Ugh globals, ugh.
var isStdLib = doIsStdLib
// This was loving taken from src/cmd/go/pkg.go in Go's code (isStandardImportPath).
func doIsStdLib(path string) bool {
i := strings.Index(path, "/")
if i < 0 {
i = len(path)
}
return !strings.Contains(path[:i], ".")
}
// ListPackages reports Go package information about all directories in the tree
// at or below the provided fileRoot.
//
// Directories without any valid Go files are excluded. Directories with
// multiple packages are excluded.
//
// The importRoot parameter is prepended to the relative path when determining
// the import path for each package. The obvious case is for something typical,
// like:
//
// fileRoot = "/home/user/go/src/github.com/foo/bar"
// importRoot = "github.com/foo/bar"
//
// where the fileRoot and importRoot align. However, if you provide:
//
// fileRoot = "/home/user/workspace/path/to/repo"
// importRoot = "github.com/foo/bar"
//
// then the root package at path/to/repo will be ascribed import path
// "github.com/foo/bar", and the package at
// "/home/user/workspace/path/to/repo/baz" will be "github.com/foo/bar/baz".
//
// A PackageTree is returned, which contains the ImportRoot and map of import path
// to PackageOrErr - each path under the root that exists will have either a
// Package, or an error describing why the directory is not a valid package.
func ListPackages(fileRoot, importRoot string) (PackageTree, error) {
ptree := PackageTree{
ImportRoot: importRoot,
Packages: make(map[string]PackageOrErr),
}
// helper func to create a Package from a *build.Package
happy := func(importPath string, p *build.Package) Package {
// Happy path - simple parsing worked
pkg := Package{
ImportPath: importPath,
CommentPath: p.ImportComment,
Name: p.Name,
Imports: p.Imports,
TestImports: dedupeStrings(p.TestImports, p.XTestImports),
}
return pkg
}
var err error
fileRoot, err = filepath.Abs(fileRoot)
if err != nil {
return PackageTree{}, err
}
err = filepath.Walk(fileRoot, func(wp string, fi os.FileInfo, err error) error {
if err != nil && err != filepath.SkipDir {
return err
}
if !fi.IsDir() {
return nil
}
// Skip dirs that are known to hold non-local/dependency code.
//
// We don't skip _*, or testdata dirs because, while it may be poor
// form, importing them is not a compilation error.
switch fi.Name() {
case "vendor", "Godeps":
return filepath.SkipDir
}
// We do skip dot-dirs, though, because it's such a ubiquitous standard
// that they not be visited by normal commands, and because things get
// really weird if we don't.
if strings.HasPrefix(fi.Name(), ".") {
return filepath.SkipDir
}
// Compute the import path. Run the result through ToSlash(), so that windows
// paths are normalized to Unix separators, as import paths are expected
// to be.
ip := filepath.ToSlash(filepath.Join(importRoot, strings.TrimPrefix(wp, fileRoot)))
// Find all the imports, across all os/arch combos
//p, err := fullPackageInDir(wp)
p := &build.Package{
Dir: wp,
}
err = fillPackage(p)
var pkg Package
if err == nil {
pkg = happy(ip, p)
} else {
switch err.(type) {
case gscan.ErrorList, *gscan.Error, *build.NoGoError:
// This happens if we encounter malformed or nonexistent Go
// source code
ptree.Packages[ip] = PackageOrErr{
Err: err,
}
return nil
default:
return err
}
}
// This area has some...fuzzy rules, but check all the imports for
// local/relative/dot-ness, and record an error for the package if we
// see any.
var lim []string
for _, imp := range append(pkg.Imports, pkg.TestImports...) {
switch {
// Do allow the single-dot, at least for now
case imp == "..":
lim = append(lim, imp)
// ignore stdlib done this way, b/c that's what the go tooling does
case strings.HasPrefix(imp, "./"):
if isStdLib(imp[2:]) {
lim = append(lim, imp)
}
case strings.HasPrefix(imp, "../"):
if isStdLib(imp[3:]) {
lim = append(lim, imp)
}
}
}
if len(lim) > 0 {
ptree.Packages[ip] = PackageOrErr{
Err: &LocalImportsError{
Dir: ip,
LocalImports: lim,
},
}
} else {
ptree.Packages[ip] = PackageOrErr{
P: pkg,
}
}
return nil
})
if err != nil {
return PackageTree{}, err
}
return ptree, nil
}
// fillPackage full of info. Assumes p.Dir is set at a minimum
func fillPackage(p *build.Package) error {
if p.SrcRoot == "" {
for _, base := range build.Default.SrcDirs() {
if strings.HasPrefix(p.Dir, base) {
p.SrcRoot = base
}
}
}
if p.SrcRoot == "" {
return errors.New("Unable to find SrcRoot for package " + p.ImportPath)
}
if p.Root == "" {
p.Root = filepath.Dir(p.SrcRoot)
}
var buildMatch = "+build "
var buildFieldSplit = func(r rune) bool {
return unicode.IsSpace(r) || r == ','
}
gofiles, err := filepath.Glob(filepath.Join(p.Dir, "*.go"))
if err != nil {
return err
}
if len(gofiles) == 0 {
return &build.NoGoError{Dir: p.Dir}
}
var testImports []string
var imports []string
NextFile:
for _, file := range gofiles {
pf, err := parser.ParseFile(token.NewFileSet(), file, nil, parser.ImportsOnly|parser.ParseComments)
if err != nil {
return err
}
testFile := strings.HasSuffix(file, "_test.go")
fname := filepath.Base(file)
for _, c := range pf.Comments {
if c.Pos() > pf.Package { // +build must come before package
continue
}
ct := c.Text()
if i := strings.Index(ct, buildMatch); i != -1 {
for _, t := range strings.FieldsFunc(ct[i+len(buildMatch):], buildFieldSplit) {
for _, tag := range ignoreTags {
if t == tag {
p.IgnoredGoFiles = append(p.IgnoredGoFiles, fname)
continue NextFile
}
}
}
}
}
if testFile {
p.TestGoFiles = append(p.TestGoFiles, fname)
if p.Name == "" {
p.Name = strings.TrimSuffix(pf.Name.Name, "_test")
}
} else {
if p.Name == "" {
p.Name = pf.Name.Name
}
p.GoFiles = append(p.GoFiles, fname)
}
for _, is := range pf.Imports {
name, err := strconv.Unquote(is.Path.Value)
if err != nil {
return err // can't happen?
}
if testFile {
testImports = append(testImports, name)
} else {
imports = append(imports, name)
}
}
}
imports = uniq(imports)
testImports = uniq(testImports)
p.Imports = imports
p.TestImports = testImports
return nil
}
// LocalImportsError indicates that a package contains at least one relative
// import that will prevent it from compiling.
//
// TODO(sdboyer) add a Files property once we're doing our own per-file parsing
type LocalImportsError struct {
Dir string
LocalImports []string
}
func (e *LocalImportsError) Error() string {
return fmt.Sprintf("import path %s had problematic local imports", e.Dir)
}
func readFileBuildTags(fp string) ([]string, error) {
co, err := readGoContents(fp)
if err != nil {
return []string{}, err
}
var tags []string
// Only look at places where we had a code comment.
if len(co) > 0 {
t := findTags(co)
for _, tg := range t {
found := false
for _, tt := range tags {
if tt == tg {
found = true
}
}
if !found {
tags = append(tags, tg)
}
}
}
return tags, nil
}
// Read contents of a Go file up to the package declaration. This can be used
// to find the the build tags.
func readGoContents(fp string) ([]byte, error) {
f, err := os.Open(fp)
defer f.Close()
if err != nil {
return []byte{}, err
}
var s scanner.Scanner
s.Init(f)
var tok rune
var pos scanner.Position
for tok != scanner.EOF {
tok = s.Scan()
// Getting the token text will skip comments by default.
tt := s.TokenText()
// build tags will not be after the package declaration.
if tt == "package" {
pos = s.Position
break
}
}
var buf bytes.Buffer
f.Seek(0, 0)
_, err = io.CopyN(&buf, f, int64(pos.Offset))
if err != nil {
return []byte{}, err
}
return buf.Bytes(), nil
}
// From a byte slice of a Go file find the tags.
func findTags(co []byte) []string {
p := co
var tgs []string
for len(p) > 0 {
line := p
if i := bytes.IndexByte(line, '\n'); i >= 0 {
line, p = line[:i], p[i+1:]
} else {
p = p[len(p):]
}
line = bytes.TrimSpace(line)
// Only look at comment lines that are well formed in the Go style
if bytes.HasPrefix(line, []byte("//")) {
line = bytes.TrimSpace(line[len([]byte("//")):])
if len(line) > 0 && line[0] == '+' {
f := strings.Fields(string(line))
// We've found a +build tag line.
if f[0] == "+build" {
for _, tg := range f[1:] {
tgs = append(tgs, tg)
}
}
}
}
}
return tgs
}
// A PackageTree represents the results of recursively parsing a tree of
// packages, starting at the ImportRoot. The results of parsing the files in the
// directory identified by each import path - a Package or an error - are stored
// in the Packages map, keyed by that import path.
type PackageTree struct {
ImportRoot string
Packages map[string]PackageOrErr
}
// dup copies the PackageTree.
//
// This is really only useful as a defensive measure to prevent external state
// mutations.
func (t PackageTree) dup() PackageTree {
t2 := PackageTree{
ImportRoot: t.ImportRoot,
Packages: map[string]PackageOrErr{},
}
for path, poe := range t.Packages {
poe2 := PackageOrErr{
Err: poe.Err,
P: poe.P,
}
if len(poe.P.Imports) > 0 {
poe2.P.Imports = make([]string, len(poe.P.Imports))
copy(poe2.P.Imports, poe.P.Imports)
}
if len(poe.P.TestImports) > 0 {
poe2.P.TestImports = make([]string, len(poe.P.TestImports))
copy(poe2.P.TestImports, poe.P.TestImports)
}
t2.Packages[path] = poe2
}
return t2
}
type wm struct {
err error
ex map[string]bool
in map[string]bool
}
// PackageOrErr stores the results of attempting to parse a single directory for
// Go source code.
type PackageOrErr struct {
P Package
Err error
}
// ReachMap maps a set of import paths (keys) to the set of external packages
// transitively reachable from the packages at those import paths.
//
// See PackageTree.ExternalReach() for more information.
type ReachMap map[string][]string
// ExternalReach looks through a PackageTree and computes the list of external
// import statements (that is, import statements pointing to packages that are
// not logical children of PackageTree.ImportRoot) that are transitively
// imported by the internal packages in the tree.
//
// main indicates whether (true) or not (false) to include main packages in the
// analysis. When utilized by gps' solver, main packages are generally excluded
// from analyzing anything other than the root project, as they necessarily can't
// be imported.
//
// tests indicates whether (true) or not (false) to include imports from test
// files in packages when computing the reach map.
//
// ignore is a map of import paths that, if encountered, should be excluded from
// analysis. This exclusion applies to both internal and external packages. If
// an external import path is ignored, it is simply omitted from the results.
//
// If an internal path is ignored, then not only does it not appear in the final
// map, but it is also excluded from the transitive calculations of other
// internal packages. That is, if you ignore A/foo, then the external package
// list for all internal packages that import A/foo will not include external
// packages that are only reachable through A/foo.
//
// Visually, this means that, given a PackageTree with root A and packages at A,
// A/foo, and A/bar, and the following import chain:
//
// A -> A/foo -> A/bar -> B/baz
//
// In this configuration, all of A's packages transitively import B/baz, so the
// returned map would be:
//
// map[string][]string{
// "A": []string{"B/baz"},
// "A/foo": []string{"B/baz"}
// "A/bar": []string{"B/baz"},
// }
//
// However, if you ignore A/foo, then A's path to B/baz is broken, and A/foo is
// omitted entirely. Thus, the returned map would be:
//
// map[string][]string{
// "A": []string{},
// "A/bar": []string{"B/baz"},
// }
//
// If there are no packages to ignore, it is safe to pass a nil map.
func (t PackageTree) ExternalReach(main, tests bool, ignore map[string]bool) ReachMap {
if ignore == nil {
ignore = make(map[string]bool)
}
// world's simplest adjacency list
workmap := make(map[string]wm)
var imps []string
for ip, perr := range t.Packages {
if perr.Err != nil {
workmap[ip] = wm{
err: perr.Err,
}
continue
}
p := perr.P
// Skip main packages, unless param says otherwise
if p.Name == "main" && !main {
continue
}
// Skip ignored packages
if ignore[ip] {
continue
}
imps = imps[:0]
imps = p.Imports
if tests {
imps = dedupeStrings(imps, p.TestImports)
}
w := wm{
ex: make(map[string]bool),
in: make(map[string]bool),
}
for _, imp := range imps {
// Skip ignored imports
if ignore[imp] {
continue
}
if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) {
w.ex[imp] = true
} else {
if w2, seen := workmap[imp]; seen {
for i := range w2.ex {
w.ex[i] = true
}
for i := range w2.in {
w.in[i] = true
}
} else {
w.in[imp] = true
}
}
}
workmap[ip] = w
}
//return wmToReach(workmap, t.ImportRoot)
return wmToReach(workmap, "") // TODO(sdboyer) this passes tests, but doesn't seem right
}
// wmToReach takes an internal "workmap" constructed by
// PackageTree.ExternalReach(), transitively walks (via depth-first traversal)
// all internal imports until they reach an external path or terminate, then
// translates the results into a slice of external imports for each internal
// pkg.
//
// The basedir string, with a trailing slash ensured, will be stripped from the
// keys of the returned map.
//
// This is mostly separated out for testing purposes.
func wmToReach(workmap map[string]wm, basedir string) map[string][]string {
// Uses depth-first exploration to compute reachability into external
// packages, dropping any internal packages on "poisoned paths" - a path
// containing a package with an error, or with a dep on an internal package
// that's missing.
const (
white uint8 = iota
grey
black
)
colors := make(map[string]uint8)
allreachsets := make(map[string]map[string]struct{})
// poison is a helper func to eliminate specific reachsets from allreachsets
poison := func(path []string) {
for _, ppkg := range path {
delete(allreachsets, ppkg)
}
}
var dfe func(string, []string) bool
// dfe is the depth-first-explorer that computes a safe, error-free external
// reach map.
//
// pkg is the import path of the pkg currently being visited; path is the
// stack of parent packages we've visited to get to pkg. The return value
// indicates whether the level completed successfully (true) or if it was
// poisoned (false).
//
// TODO(sdboyer) some deft improvements could probably be made by passing the list of
// parent reachsets, rather than a list of parent package string names.
// might be able to eliminate the use of allreachsets map-of-maps entirely.
dfe = func(pkg string, path []string) bool {
// white is the zero value of uint8, which is what we want if the pkg
// isn't in the colors map, so this works fine
switch colors[pkg] {
case white:
// first visit to this pkg; mark it as in-process (grey)
colors[pkg] = grey
// make sure it's present and w/out errs
w, exists := workmap[pkg]
if !exists || w.err != nil {
// Does not exist or has an err; poison self and all parents
poison(path)
// we know we're done here, so mark it black
colors[pkg] = black
return false
}
// pkg exists with no errs. mark it as in-process (grey), and start
// a reachmap for it
//
// TODO(sdboyer) use sync.Pool here? can be lots of explicit map alloc/dealloc
rs := make(map[string]struct{})
// Push self onto the path slice. Passing this as a value has the
// effect of auto-popping the slice, while also giving us safe
// memory reuse.
path = append(path, pkg)
// Dump this package's external pkgs into its own reachset. Separate
// loop from the parent dump to avoid nested map loop lookups.
for ex := range w.ex {
rs[ex] = struct{}{}
}
allreachsets[pkg] = rs
// Push this pkg's external imports into all parent reachsets. Not
// all parents will necessarily have a reachset; none, some, or all
// could have been poisoned by a different path than what we're on
// right now. (Or we could be at depth 0)
for _, ppkg := range path {
if prs, exists := allreachsets[ppkg]; exists {
for ex := range w.ex {
prs[ex] = struct{}{}
}
}
}
// Now, recurse until done, or a false bubbles up, indicating the
// path is poisoned.
var clean bool
for in := range w.in {
// It's possible, albeit weird, for a package to import itself.
// If we try to visit self, though, then it erroneously poisons
// the path, as it would be interpreted as grey. In reality,
// this becomes a no-op, so just skip it.
if in == pkg {
continue
}
clean = dfe(in, path)
if !clean {
// Path is poisoned. Our reachmap was already deleted by the
// path we're returning from; mark ourselves black, then
// bubble up the poison. This is OK to do early, before
// exploring all internal imports, because the outer loop
// visits all internal packages anyway.
//
// In fact, stopping early is preferable - white subpackages
// won't have to iterate pointlessly through a parent path
// with no reachset.
colors[pkg] = black
return false
}
}
// Fully done with this pkg; no transitive problems.
colors[pkg] = black
return true
case grey:
// grey means an import cycle; guaranteed badness right here. You'd
// hope we never encounter it in a dependency (really? you published
// that code?), but we have to defend against it.
//
// FIXME handle import cycles by dropping everything involved. (i
// think we need to compute SCC, then drop *all* of them?)
colors[pkg] = black
poison(append(path, pkg)) // poison self and parents
case black:
// black means we're done with the package. If it has an entry in
// allreachsets, it completed successfully. If not, it was poisoned,
// and we need to bubble the poison back up.
rs, exists := allreachsets[pkg]
if !exists {
// just poison parents; self was necessarily already poisoned
poison(path)
return false
}
// It's good; pull over of the external imports from its reachset
// into all non-poisoned parent reachsets
for _, ppkg := range path {
if prs, exists := allreachsets[ppkg]; exists {
for ex := range rs {
prs[ex] = struct{}{}
}
}
}
return true
default:
panic(fmt.Sprintf("invalid color marker %v for %s", colors[pkg], pkg))
}
// shouldn't ever hit this
return false
}
// Run the depth-first exploration.
//
// Don't bother computing graph sources, this straightforward loop works
// comparably well, and fits nicely with an escape hatch in the dfe.
var path []string
for pkg := range workmap {
dfe(pkg, path)
}
if len(allreachsets) == 0 {
return nil
}
// Flatten allreachsets into the final reachlist
rt := strings.TrimSuffix(basedir, string(os.PathSeparator)) + string(os.PathSeparator)
rm := make(map[string][]string)
for pkg, rs := range allreachsets {
rlen := len(rs)
if rlen == 0 {
rm[strings.TrimPrefix(pkg, rt)] = nil
continue
}
edeps := make([]string, rlen)
k := 0
for opkg := range rs {
edeps[k] = opkg
k++
}
sort.Strings(edeps)
rm[strings.TrimPrefix(pkg, rt)] = edeps
}
return rm
}
// ListExternalImports computes a sorted, deduplicated list of all the external
// packages that are reachable through imports from all valid packages in a
// ReachMap, as computed by PackageTree.ExternalReach().
//
// main and tests determine whether main packages and test imports should be
// included in the calculation. "External" is defined as anything not prefixed,
// after path cleaning, by the PackageTree.ImportRoot. This includes stdlib.
//
// If an internal path is ignored, all of the external packages that it uniquely
// imports are omitted. Note, however, that no internal transitivity checks are
// made here - every non-ignored package in the tree is considered independently
// (with one set of exceptions, noted below). That means, given a PackageTree
// with root A and packages at A, A/foo, and A/bar, and the following import
// chain:
//
// A -> A/foo -> A/bar -> B/baz
//
// If you ignore A or A/foo, A/bar will still be visited, and B/baz will be
// returned, because this method visits ALL packages in the tree, not only those reachable
// from the root (or any other) packages. If your use case requires interrogating
// external imports with respect to only specific package entry points, you need
// ExternalReach() instead.
//
// It is safe to pass a nil map if there are no packages to ignore.
//
// If an internal package has an error (that is, PackageOrErr is Err), it is excluded from
// consideration. Internal packages that transitively import the error package
// are also excluded. So, if:
//
// -> B/foo
// /
// A
// \
// -> A/bar -> B/baz
//
// And A/bar has some error in it, then both A and A/bar will be eliminated from
// consideration; neither B/foo nor B/baz will be in the results. If A/bar, with
// its errors, is ignored, however, then A will remain, and B/foo will be in the
// results.
//
// Finally, note that if a directory is named "testdata", or has a leading dot
// or underscore, it will not be directly analyzed as a source. This is in
// keeping with Go tooling conventions that such directories should be ignored.
// So, if:
//
// A -> B/foo
// A/.bar -> B/baz
// A/_qux -> B/baz
// A/testdata -> B/baz
//
// Then B/foo will be returned, but B/baz will not, because all three of the
// packages that import it are in directories with disallowed names.
//
// HOWEVER, in keeping with the Go compiler, if one of those packages in a
// disallowed directory is imported by a package in an allowed directory, then
// it *will* be used. That is, while tools like go list will ignore a directory
// named .foo, you can still import from .foo. Thus, it must be included. So,
// if:
//
// -> B/foo
// /
// A
// \
// -> A/.bar -> B/baz
//
// A is legal, and it imports A/.bar, so the results will include B/baz.
func (rm ReachMap) ListExternalImports() []string {
exm := make(map[string]struct{})
for pkg, reach := range rm {
// Eliminate import paths with any elements having leading dots, leading
// underscores, or testdata. If these are internally reachable (which is
// a no-no, but possible), any external imports will have already been
// pulled up through ExternalReach. The key here is that we don't want
// to treat such packages as themselves being sources.
//
// TODO(sdboyer) strings.Split will always heap alloc, which isn't great to do
// in a loop like this. We could also just parse it ourselves...
var skip bool
for _, elem := range strings.Split(pkg, "/") {
if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
skip = true
break
}
}
if !skip {
for _, ex := range reach {
exm[ex] = struct{}{}
}
}
}
if len(exm) == 0 {
return nil
}
ex := make([]string, len(exm))
k := 0
for p := range exm {
ex[k] = p
k++
}
sort.Strings(ex)
return ex
}
// checkPrefixSlash checks to see if the prefix is a prefix of the string as-is,
// and that it is either equal OR the prefix + / is still a prefix.
func checkPrefixSlash(s, prefix string) bool {
if !strings.HasPrefix(s, prefix) {
return false
}
return s == prefix || strings.HasPrefix(s, ensureTrailingSlash(prefix))
}
func ensureTrailingSlash(s string) string {
return strings.TrimSuffix(s, string(os.PathSeparator)) + string(os.PathSeparator)
}
// helper func to merge, dedupe, and sort strings
func dedupeStrings(s1, s2 []string) (r []string) {
dedupe := make(map[string]bool)
if len(s1) > 0 && len(s2) > 0 {
for _, i := range s1 {
dedupe[i] = true
}
for _, i := range s2 {
dedupe[i] = true
}
for i := range dedupe {
r = append(r, i)
}
// And then re-sort them
sort.Strings(r)
} else if len(s1) > 0 {
r = s1
} else if len(s2) > 0 {
r = s2
}
return
}
func uniq(a []string) []string {
if a == nil {
return make([]string, 0)
}
var s string
var i int
if !sort.StringsAreSorted(a) {
sort.Strings(a)
}
for _, t := range a {
if t != s {
a[i] = t
i++
s = t
}
}
return a[:i]
}
|
// Copyright 2016 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main_test
import (
"fmt"
"net"
"os/exec"
"strings"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containernetworking/plugins/pkg/testutils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
)
var _ = Describe("Loopback", func() {
var (
networkNS ns.NetNS
command *exec.Cmd
environ []string
)
BeforeEach(func() {
command = exec.Command(pathToLoPlugin)
var err error
networkNS, err = testutils.NewNS()
Expect(err).NotTo(HaveOccurred())
environ = []string{
fmt.Sprintf("CNI_CONTAINERID=%s", "dummy"),
fmt.Sprintf("CNI_NETNS=%s", networkNS.Path()),
fmt.Sprintf("CNI_IFNAME=%s", "lo"),
fmt.Sprintf("CNI_ARGS=%s", "none"),
fmt.Sprintf("CNI_PATH=%s", "/some/test/path"),
}
command.Stdin = strings.NewReader(`{ "name": "loopback-test", "cniVersion": "0.1.0" }`)
})
AfterEach(func() {
Expect(networkNS.Close()).To(Succeed())
Expect(testutils.UnmountNS(networkNS)).To(Succeed())
})
Context("when given a network namespace", func() {
It("sets the lo device to UP", func() {
command.Env = append(environ, fmt.Sprintf("CNI_COMMAND=%s", "ADD"))
session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(session).Should(gbytes.Say(`{.*}`))
Eventually(session).Should(gexec.Exit(0))
var lo *net.Interface
err = networkNS.Do(func(ns.NetNS) error {
var err error
lo, err = net.InterfaceByName("lo")
return err
})
Expect(err).NotTo(HaveOccurred())
Expect(lo.Flags & net.FlagUp).To(Equal(net.FlagUp))
})
It("sets the lo device to DOWN", func() {
command.Env = append(environ, fmt.Sprintf("CNI_COMMAND=%s", "DEL"))
session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(session).Should(gbytes.Say(``))
Eventually(session).Should(gexec.Exit(0))
var lo *net.Interface
err = networkNS.Do(func(ns.NetNS) error {
var err error
lo, err = net.InterfaceByName("lo")
return err
})
Expect(err).NotTo(HaveOccurred())
Expect(lo.Flags & net.FlagUp).NotTo(Equal(net.FlagUp))
})
})
})
loopback: increase test coverage to 1.0.0 and older spec versions
Signed-off-by: Dan Williams <aeade43d0f8ae14e7c44fa81fe17c1635ae376fe@redhat.com>
// Copyright 2016 CNI authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main_test
import (
"fmt"
"net"
"os/exec"
"strings"
"github.com/containernetworking/plugins/pkg/ns"
"github.com/containernetworking/plugins/pkg/testutils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
)
func generateConfig(cniVersion string) *strings.Reader {
return strings.NewReader(fmt.Sprintf(`{ "name": "loopback-test", "cniVersion": "%s" }`, cniVersion))
}
var _ = Describe("Loopback", func() {
var (
networkNS ns.NetNS
command *exec.Cmd
environ []string
)
BeforeEach(func() {
command = exec.Command(pathToLoPlugin)
var err error
networkNS, err = testutils.NewNS()
Expect(err).NotTo(HaveOccurred())
environ = []string{
fmt.Sprintf("CNI_CONTAINERID=%s", "dummy"),
fmt.Sprintf("CNI_NETNS=%s", networkNS.Path()),
fmt.Sprintf("CNI_IFNAME=%s", "lo"),
fmt.Sprintf("CNI_ARGS=%s", "none"),
fmt.Sprintf("CNI_PATH=%s", "/some/test/path"),
}
})
AfterEach(func() {
Expect(networkNS.Close()).To(Succeed())
Expect(testutils.UnmountNS(networkNS)).To(Succeed())
})
for _, ver := range testutils.AllSpecVersions {
// Redefine ver inside for scope so real value is picked up by each dynamically defined It()
// See Gingkgo's "Patterns for dynamically generating tests" documentation.
ver := ver
Context("when given a network namespace", func() {
It(fmt.Sprintf("[%s] sets the lo device to UP", ver), func() {
command.Stdin = generateConfig(ver)
command.Env = append(environ, fmt.Sprintf("CNI_COMMAND=%s", "ADD"))
session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(session).Should(gbytes.Say(`{.*}`))
Eventually(session).Should(gexec.Exit(0))
var lo *net.Interface
err = networkNS.Do(func(ns.NetNS) error {
var err error
lo, err = net.InterfaceByName("lo")
return err
})
Expect(err).NotTo(HaveOccurred())
Expect(lo.Flags & net.FlagUp).To(Equal(net.FlagUp))
})
It(fmt.Sprintf("[%s] sets the lo device to DOWN", ver), func() {
command.Stdin = generateConfig(ver)
command.Env = append(environ, fmt.Sprintf("CNI_COMMAND=%s", "DEL"))
session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(session).Should(gbytes.Say(``))
Eventually(session).Should(gexec.Exit(0))
var lo *net.Interface
err = networkNS.Do(func(ns.NetNS) error {
var err error
lo, err = net.InterfaceByName("lo")
return err
})
Expect(err).NotTo(HaveOccurred())
Expect(lo.Flags & net.FlagUp).NotTo(Equal(net.FlagUp))
})
})
}
})
|
// Package rundeck provides a client for interacting with a Rundeck instance
// via its HTTP API.
//
// Instantiate a Client with the NewClient function to get started.
//
// At present this package uses Rundeck API version 13.
package rundeck
import (
"crypto/tls"
"encoding/xml"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"bytes"
)
// ClientConfig is used with NewClient to specify initialization settings.
type ClientConfig struct {
// The base URL of the Rundeck instance.
BaseURL string
// The API auth token generated from user settings in the Rundeck UI.
AuthToken string
// Don't fail if the server uses SSL with an un-verifiable certificate.
// This is not recommended except during development/debugging.
AllowUnverifiedSSL bool
}
// Client is a Rundeck API client interface.
type Client struct {
httpClient *http.Client
apiURL *url.URL
authToken string
}
// NewClient returns a configured Rundeck client.
func NewClient(config *ClientConfig) (*Client, error) {
t := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: config.AllowUnverifiedSSL,
},
}
httpClient := &http.Client{
Transport: t,
}
apiPath, _ := url.Parse("api/13/")
baseUrl, err := url.Parse(config.BaseURL)
if err != nil {
return nil, fmt.Errorf("Invalid base URL: %s", err.Error())
}
apiURL := baseUrl.ResolveReference(apiPath)
return &Client{
httpClient: httpClient,
apiURL: apiURL,
authToken: config.AuthToken,
}, nil
}
func (c *Client) request(method string, pathParts []string, query map[string]string, reqBody interface{}, result interface{}) error {
req := &http.Request{
Method: method,
Header: http.Header{},
}
req.Header.Add("User-Agent", "Go-Rundeck-API")
req.Header.Add("X-Rundeck-Auth-Token", c.authToken)
req.Header.Add("Accept", "application/xml")
urlPath := &url.URL{
Path: strings.Join(pathParts, "/"),
}
reqURL := c.apiURL.ResolveReference(urlPath)
req.URL = reqURL
if len(query) > 0 {
urlQuery := url.Values{}
for k, v := range query {
urlQuery.Add(k, v)
}
reqURL.RawQuery = urlQuery.Encode()
}
if reqBody != nil {
reqBodyBytes, err := xml.Marshal(reqBody)
if err != nil {
return err
}
req.Body = ioutil.NopCloser(bytes.NewBuffer(reqBodyBytes))
req.ContentLength = int64(len(reqBodyBytes))
req.Header.Add("Content-Type", "application/xml")
}
res, err := c.httpClient.Do(req)
if err != nil {
return err
}
resBodyBytes, err := ioutil.ReadAll(res.Body)
if err != nil {
return err
}
if res.StatusCode != 200 {
if strings.HasPrefix(res.Header.Get("Content-Type"), "text/xml") {
var richErr Error
err = xml.Unmarshal(resBodyBytes, &richErr)
if err != nil {
return fmt.Errorf("HTTP Error %i with error decoding XML body: %s", res.StatusCode, err.Error())
}
return richErr
} else {
return fmt.Errorf("HTTP Error %i", res.StatusCode)
}
}
err = xml.Unmarshal(resBodyBytes, result)
if err != nil {
err = fmt.Errorf("Error decoding response XML payload: %s", err.Error())
}
return err
}
func (c *Client) get(pathParts []string, query map[string]string, result interface{}) error {
return c.request("GET", pathParts, query, nil, result)
}
Support more response codes than just 200 OK.
The Rundeck API returns 201 Created for newly-created objects, and it
returns 204 No Content when deleting objects. To support both of these
use-cases we accept all 2xx responses as successful, and then we allow
the caller to decide (by passing nil as the "result") to not require
a response payload.
We assume that only 200 and 201 responses return response payloads. If
a different response code is returned but the client wanted a body then
that is considered an error.
// Package rundeck provides a client for interacting with a Rundeck instance
// via its HTTP API.
//
// Instantiate a Client with the NewClient function to get started.
//
// At present this package uses Rundeck API version 13.
package rundeck
import (
"crypto/tls"
"encoding/xml"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strings"
"bytes"
)
// ClientConfig is used with NewClient to specify initialization settings.
type ClientConfig struct {
// The base URL of the Rundeck instance.
BaseURL string
// The API auth token generated from user settings in the Rundeck UI.
AuthToken string
// Don't fail if the server uses SSL with an un-verifiable certificate.
// This is not recommended except during development/debugging.
AllowUnverifiedSSL bool
}
// Client is a Rundeck API client interface.
type Client struct {
httpClient *http.Client
apiURL *url.URL
authToken string
}
// NewClient returns a configured Rundeck client.
func NewClient(config *ClientConfig) (*Client, error) {
t := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: config.AllowUnverifiedSSL,
},
}
httpClient := &http.Client{
Transport: t,
}
apiPath, _ := url.Parse("api/13/")
baseUrl, err := url.Parse(config.BaseURL)
if err != nil {
return nil, fmt.Errorf("Invalid base URL: %s", err.Error())
}
apiURL := baseUrl.ResolveReference(apiPath)
return &Client{
httpClient: httpClient,
apiURL: apiURL,
authToken: config.AuthToken,
}, nil
}
func (c *Client) request(method string, pathParts []string, query map[string]string, reqBody interface{}, result interface{}) error {
req := &http.Request{
Method: method,
Header: http.Header{},
}
req.Header.Add("User-Agent", "Go-Rundeck-API")
req.Header.Add("X-Rundeck-Auth-Token", c.authToken)
req.Header.Add("Accept", "application/xml")
urlPath := &url.URL{
Path: strings.Join(pathParts, "/"),
}
reqURL := c.apiURL.ResolveReference(urlPath)
req.URL = reqURL
if len(query) > 0 {
urlQuery := url.Values{}
for k, v := range query {
urlQuery.Add(k, v)
}
reqURL.RawQuery = urlQuery.Encode()
}
if reqBody != nil {
reqBodyBytes, err := xml.Marshal(reqBody)
if err != nil {
return err
}
req.Body = ioutil.NopCloser(bytes.NewBuffer(reqBodyBytes))
req.ContentLength = int64(len(reqBodyBytes))
req.Header.Add("Content-Type", "application/xml")
}
res, err := c.httpClient.Do(req)
if err != nil {
return err
}
resBodyBytes, err := ioutil.ReadAll(res.Body)
if err != nil {
return err
}
if res.StatusCode < 200 || res.StatusCode >= 300 {
if strings.HasPrefix(res.Header.Get("Content-Type"), "text/xml") {
var richErr Error
err = xml.Unmarshal(resBodyBytes, &richErr)
if err != nil {
return fmt.Errorf("HTTP Error %i with error decoding XML body: %s", res.StatusCode, err.Error())
}
return richErr
} else {
return fmt.Errorf("HTTP Error %i", res.StatusCode)
}
}
if result != nil {
if res.StatusCode != 200 && res.StatusCode != 201 {
return fmt.Errorf("Server did not return an XML payload")
}
err = xml.Unmarshal(resBodyBytes, result)
if err != nil {
return fmt.Errorf("Error decoding response XML payload: %s", err.Error())
}
}
return nil
}
func (c *Client) get(pathParts []string, query map[string]string, result interface{}) error {
return c.request("GET", pathParts, query, nil, result)
}
|
package runner
import (
"bufio"
"errors"
"fmt"
"github.com/lhchavez/quark/common"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"syscall"
)
var (
minijailPath string = "/var/lib/minijail"
)
// Preloads an input so that the contestant's program has to wait less time.
type inputPreloader struct {
file *os.File
fileSize int64
mapping []byte
checksum uint8
}
func newInputPreloader(filePath string) (*inputPreloader, error) {
if filePath == "/dev/null" {
return nil, nil
}
file, err := os.Open(filePath)
if err != nil {
return nil, err
}
info, err := file.Stat()
if err != nil {
return nil, err
}
preloader := &inputPreloader{
file: file,
fileSize: info.Size(),
}
mapping, err := syscall.Mmap(
int(preloader.file.Fd()),
0,
int(preloader.fileSize),
syscall.PROT_READ,
syscall.MAP_SHARED,
)
if err == nil {
pageSize := os.Getpagesize()
preloader.mapping = mapping
for i := 0; i < int(preloader.fileSize); i += pageSize {
preloader.checksum += preloader.mapping[i]
}
} else {
// mmap failed, so just read all the file.
io.Copy(ioutil.Discard, preloader.file)
}
return preloader, nil
}
func (preloader *inputPreloader) release() {
if preloader.mapping != nil {
syscall.Munmap(preloader.mapping)
}
preloader.file.Close()
}
// RunMetadata represents the results of an execution.
type RunMetadata struct {
Verdict string `json:"verdict"`
ExitStatus int `json:"exit_status,omitempty"`
Time float64 `json:"time"`
WallTime float64 `json:"wall_time"`
Memory int64 `json:"memory"`
Signal *string `json:"signal,omitempty"`
Syscall *string `json:"syscall,omitempty"`
}
type Sandbox interface {
// Supported returns true if the sandbox is available in the system.
Supported() bool
// Compile performs a compilation in the specified language.
Compile(
ctx *common.Context,
lang string,
inputFiles []string,
chdir, outputFile, errorFile, metaFile, target string,
extraFlags []string,
) (*RunMetadata, error)
// Run uses a previously compiled program and runs it against a single test
// case with the supplied settings.
Run(
ctx *common.Context,
settings *common.ProblemSettings,
lang, chdir, inputFile, outputFile, errorFile, metaFile, target string,
originalInputFile, originalOutputFile, runMetaFile *string,
extraParams []string,
extraMountPoints map[string]string,
) (*RunMetadata, error)
}
type MinijailSandbox struct{}
func (*MinijailSandbox) Supported() bool {
_, err := os.Stat(path.Join(minijailPath, "bin/minijail0"))
return err == nil
}
func (*MinijailSandbox) Compile(
ctx *common.Context,
lang string,
inputFiles []string,
chdir, outputFile, errorFile, metaFile, target string,
extraFlags []string,
) (*RunMetadata, error) {
commonParams := []string{
path.Join(minijailPath, "bin/minijail0"),
"-C", path.Join(minijailPath, "root-compilers"),
"-d", "/home",
"-b", chdir + ",/home,1",
"-1", outputFile,
"-2", errorFile,
"-M", metaFile,
"-t", strconv.Itoa(ctx.Config.Runner.CompileTimeLimit * 1000),
"-O", strconv.Itoa(ctx.Config.Runner.CompileOutputLimit),
}
inputFlags := make([]string, 0)
for _, inputFile := range inputFiles {
if !strings.HasPrefix(inputFile, chdir) {
return &RunMetadata{
Verdict: "JE",
ExitStatus: -1,
}, errors.New("file " + inputFile + " is not within the chroot")
}
rel, err := filepath.Rel(chdir, inputFile)
if err != nil {
return &RunMetadata{
Verdict: "JE",
ExitStatus: -1,
}, err
}
inputFlags = append(inputFlags, rel)
}
var params []string
linkerFlags := make([]string, 0)
switch lang {
case "java":
params = []string{
"-S", path.Join(minijailPath, "scripts/javac"),
"-b", path.Join(minijailPath, "root-openjdk,/usr/lib/jvm"),
"-b", "/sys/,/sys",
"--", "/usr/bin/javac", "-J-Xmx512M",
}
case "c":
params = []string{
"-S", path.Join(minijailPath, "scripts/gcc"),
"--", "/usr/bin/gcc", "-o", target, "-std=c11", "-O2",
}
linkerFlags = append(linkerFlags, "-lm")
case "cpp":
params = []string{
"-S", path.Join(minijailPath, "scripts/gcc"),
"--", "/usr/bin/g++", "-o", target, "-O2",
}
linkerFlags = append(linkerFlags, "-lm")
case "cpp11":
params = []string{
"-S", path.Join(minijailPath, "scripts/gcc"),
"--", "/usr/bin/g++", "-o", target, "-std=c++11", "-O2",
}
linkerFlags = append(linkerFlags, "-lm")
case "pas":
params = []string{
"-S", path.Join(minijailPath, "scripts/fpc"),
"--", "/usr/bin/ldwrapper", "/usr/bin/fpc", "-Tlinux", "-O2",
"-Mobjfpc", "-Sc", "-Sh", fmt.Sprintf("-o%s", target),
}
case "py":
params = []string{
"-S", path.Join(minijailPath, "scripts/pyc"),
"-b", path.Join(minijailPath, "root-python") + ",/usr/lib/python2.7",
"--", "/usr/bin/python", "-m", "py_compile",
}
case "rb":
params = []string{
"-S", path.Join(minijailPath, "scripts/ruby"),
"-b", path.Join(minijailPath, "root-ruby") + ",/usr/lib/ruby",
"--", "/usr/bin/ruby", "-wc",
}
case "kj":
params = []string{
"-S", path.Join(minijailPath, "scripts/kcl"),
"--", "/usr/bin/ldwrapper", "/usr/bin/kcl", "-lj",
"-o", fmt.Sprintf("%s.kx", target), "-c",
}
case "kp":
params = []string{
"-S", path.Join(minijailPath, "scripts/kcl"),
"--", "/usr/bin/ldwrapper", "/usr/bin/kcl", "-lp",
"-o", fmt.Sprintf("%s.kx", target), "-c",
}
case "hs":
params = []string{
"-S", path.Join(minijailPath, "scripts/ghc"),
"-b", path.Join(minijailPath, "root-hs") + ",/usr/lib/ghc",
"--", "/usr/lib/ghc/lib/ghc", "-B/usr/lib/ghc", "-O2", "-o", target,
}
}
finalParams := make([]string, 0)
finalParams = append(finalParams, commonParams...)
finalParams = append(finalParams, params...)
finalParams = append(finalParams, extraFlags...)
finalParams = append(finalParams, inputFlags...)
finalParams = append(finalParams, linkerFlags...)
ctx.Log.Debug("invoking minijail", "params", finalParams)
if err := exec.Command("/usr/bin/sudo", finalParams...).Run(); err != nil {
ctx.Log.Error(
"Minijail execution failed",
"err", err,
)
}
metaFd, err := os.Open(metaFile)
if err != nil {
return &RunMetadata{
Verdict: "JE",
ExitStatus: -1,
}, err
}
defer metaFd.Close()
return parseMetaFile(ctx, nil, lang, metaFd, false)
}
func (*MinijailSandbox) Run(
ctx *common.Context,
settings *common.ProblemSettings,
lang, chdir, inputFile, outputFile, errorFile, metaFile, target string,
originalInputFile, originalOutputFile, runMetaFile *string,
extraParams []string,
extraMountPoints map[string]string,
) (*RunMetadata, error) {
timeLimit := settings.Limits.TimeLimit
if lang == "java" {
timeLimit += 1000
}
commonParams := []string{
path.Join(minijailPath, "bin/minijail0"),
"-C", path.Join(minijailPath, "root"),
"-d", "/home",
"-b", chdir + ",/home",
"-0", inputFile,
"-1", outputFile,
"-2", errorFile,
"-M", metaFile,
"-t", strconv.FormatInt(timeLimit, 10),
"-w", strconv.FormatInt(settings.Limits.ExtraWallTime, 10),
"-O", strconv.FormatInt(settings.Limits.OutputLimit, 10),
"-k", strconv.FormatInt(settings.Limits.StackLimit, 10),
}
extraMinijailFlags := make([]string, 2*len(extraMountPoints))
i := 0
for path, mountTarget := range extraMountPoints {
extraMinijailFlags[i] = "-b"
extraMinijailFlags[i+1] = fmt.Sprintf("%s,%s", path, mountTarget)
i += 2
}
type fileLink struct {
sourceFile, targetFile string
}
fileLinks := []fileLink{}
if originalInputFile != nil {
fileLinks = append(fileLinks, fileLink{
sourceFile: *originalInputFile,
targetFile: path.Join(chdir, "data.in"),
})
}
if originalOutputFile != nil && *originalOutputFile != "/dev/null" {
fileLinks = append(fileLinks, fileLink{
sourceFile: *originalOutputFile,
targetFile: path.Join(chdir, "data.out"),
})
}
if runMetaFile != nil {
fileLinks = append(fileLinks, fileLink{
sourceFile: *runMetaFile,
targetFile: path.Join(chdir, "meta.in"),
})
}
for _, fl := range fileLinks {
if _, err := os.Stat(fl.targetFile); err == nil {
os.Remove(fl.targetFile)
}
if err := os.Link(fl.sourceFile, fl.targetFile); err != nil {
return &RunMetadata{
Verdict: "JE",
ExitStatus: -1,
}, err
}
}
// 16MB + memory limit to prevent some RTE
memoryLimit := (16*1024 + settings.Limits.MemoryLimit) * 1024
// "640MB should be enough for anybody"
hardLimit := strconv.FormatInt(min64(640*1024*1024, memoryLimit), 10)
var params []string
switch lang {
case "java":
params = []string{
"-S", path.Join(minijailPath, "scripts/java"),
"-b", path.Join(minijailPath, "root-openjdk,/usr/lib/jvm"),
"-b", "/sys/,/sys",
"--", "/usr/bin/java", fmt.Sprintf("-Xmx%d", memoryLimit), target,
}
case "c", "cpp", "cpp11":
if settings.Limits.MemoryLimit != -1 {
params = []string{
"-S", path.Join(minijailPath, "scripts/cpp"),
"-m", hardLimit,
}
} else {
params = []string{
"-S", path.Join(minijailPath, "scripts/cpp-debug"),
}
}
params = append(params, "--", fmt.Sprintf("./%s", target))
case "pas":
params = []string{
"-S", path.Join(minijailPath, "scripts/pas"),
"-m", hardLimit, "--", "/usr/bin/ldwrapper", fmt.Sprintf("./%s", target),
}
case "py":
params = []string{
"-S", path.Join(minijailPath, "scripts/py"),
"-b", path.Join(minijailPath, "root-python") + ",/usr/lib/python2.7",
"-m", hardLimit, "--", "/usr/bin/python", fmt.Sprintf("./%s.py", target),
}
case "rb":
params = []string{
"-S", path.Join(minijailPath, "scripts/ruby"),
"-b", path.Join(minijailPath, "root-ruby") + ",/usr/lib/ruby",
"-m", hardLimit, "--", "/usr/bin/ruby", fmt.Sprintf("./%s.rb", target),
}
case "kp", "kj":
params = []string{
"-S", path.Join(minijailPath, "scripts/karel"),
"--", "/usr/bin/ldwrapper", "/usr/bin/karel", "/dev/stdin", "-oi", "-q",
"-p2", fmt.Sprintf("./%s.kx", target),
}
case "hs":
params = []string{
"-S", path.Join(minijailPath, "scripts/hs"),
"-b", path.Join(minijailPath, "root-hs") + ",/usr/lib/ghc",
"-m", hardLimit, "--", fmt.Sprintf("./%s", target),
}
}
finalParams := make([]string, 0)
finalParams = append(finalParams, commonParams...)
finalParams = append(finalParams, extraMinijailFlags...)
finalParams = append(finalParams, params...)
finalParams = append(finalParams, extraParams...)
ctx.Log.Debug("invoking minijail", "params", finalParams)
preloader, err := newInputPreloader(inputFile)
if err != nil {
ctx.Log.Error("Failed to preload input", "file", inputFile, "err", err)
} else if preloader != nil {
// preloader might be nil, even with no error.
preloader.release()
}
if err := exec.Command("/usr/bin/sudo", finalParams...).Run(); err != nil {
ctx.Log.Error(
"Minijail execution failed",
"err", err,
)
}
metaFd, err := os.Open(metaFile)
if err != nil {
return &RunMetadata{
Verdict: "JE",
ExitStatus: -1,
}, err
}
defer metaFd.Close()
return parseMetaFile(ctx, settings, lang, metaFd, lang == "c")
}
func parseMetaFile(
ctx *common.Context,
settings *common.ProblemSettings,
lang string,
metaFile io.Reader,
allowNonZeroExitCode bool,
) (*RunMetadata, error) {
meta := &RunMetadata{
Verdict: "JE",
ExitStatus: -1,
}
scanner := bufio.NewScanner(metaFile)
for scanner.Scan() {
tokens := strings.SplitN(scanner.Text(), ":", 2)
switch tokens[0] {
case "status":
meta.ExitStatus, _ = strconv.Atoi(tokens[1])
case "time":
meta.Time, _ = strconv.ParseFloat(tokens[1], 64)
meta.Time /= 1e6
case "time-wall":
meta.WallTime, _ = strconv.ParseFloat(tokens[1], 64)
meta.WallTime /= 1e6
case "mem":
meta.Memory, _ = strconv.ParseInt(tokens[1], 10, 64)
case "signal":
meta.Signal = &tokens[1]
case "signal_number":
stringSignal := fmt.Sprintf("SIGNAL %s", tokens[1])
meta.Signal = &stringSignal
case "syscall":
meta.Syscall = &tokens[1]
case "syscall_number":
stringSyscall := fmt.Sprintf("SYSCALL %s", tokens[1])
meta.Syscall = &stringSyscall
default:
ctx.Log.Warn("Unknown field in .meta file", "tokens", tokens)
}
}
if err := scanner.Err(); err != nil {
return meta, err
}
if meta.Signal != nil {
switch *meta.Signal {
case "SIGILL", "SIGSYS":
meta.Verdict = "RFE"
case "SIGABRT", "SIGFPE", "SIGKILL", "SIGPIPE", "SIGBUS", "SIGSEGV":
meta.Verdict = "RTE"
case "SIGALRM", "SIGXCPU":
meta.Verdict = "TLE"
case "SIGXFSZ":
meta.Verdict = "OLE"
default:
ctx.Log.Error("Received odd signal", "signal", *meta.Signal)
meta.Verdict = "RTE"
}
} else if meta.ExitStatus == 0 || allowNonZeroExitCode {
meta.Verdict = "OK"
} else {
meta.Verdict = "RTE"
}
if lang == "java" {
meta.Memory = max64(0, meta.Memory-ctx.Config.Runner.JavaVmEstimatedSize)
}
if settings != nil &&
settings.Limits.MemoryLimit > 0 &&
meta.Memory > settings.Limits.MemoryLimit &&
(lang != "java" || meta.ExitStatus != 0) {
meta.Verdict = "MLE"
meta.Memory = settings.Limits.MemoryLimit
}
return meta, nil
}
func min64(a, b int64) int64 {
if a < b {
return a
}
return b
}
func max64(a, b int64) int64 {
if a > b {
return a
}
return b
}
Parse minijail's system time output
package runner
import (
"bufio"
"errors"
"fmt"
"github.com/lhchavez/quark/common"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strconv"
"strings"
"syscall"
)
var (
minijailPath string = "/var/lib/minijail"
)
// Preloads an input so that the contestant's program has to wait less time.
type inputPreloader struct {
file *os.File
fileSize int64
mapping []byte
checksum uint8
}
func newInputPreloader(filePath string) (*inputPreloader, error) {
if filePath == "/dev/null" {
return nil, nil
}
file, err := os.Open(filePath)
if err != nil {
return nil, err
}
info, err := file.Stat()
if err != nil {
return nil, err
}
preloader := &inputPreloader{
file: file,
fileSize: info.Size(),
}
mapping, err := syscall.Mmap(
int(preloader.file.Fd()),
0,
int(preloader.fileSize),
syscall.PROT_READ,
syscall.MAP_SHARED,
)
if err == nil {
pageSize := os.Getpagesize()
preloader.mapping = mapping
for i := 0; i < int(preloader.fileSize); i += pageSize {
preloader.checksum += preloader.mapping[i]
}
} else {
// mmap failed, so just read all the file.
io.Copy(ioutil.Discard, preloader.file)
}
return preloader, nil
}
func (preloader *inputPreloader) release() {
if preloader.mapping != nil {
syscall.Munmap(preloader.mapping)
}
preloader.file.Close()
}
// RunMetadata represents the results of an execution.
type RunMetadata struct {
Verdict string `json:"verdict"`
ExitStatus int `json:"exit_status,omitempty"`
Time float64 `json:"time"`
SystemTime float64 `json:"sys_time"`
WallTime float64 `json:"wall_time"`
Memory int64 `json:"memory"`
Signal *string `json:"signal,omitempty"`
Syscall *string `json:"syscall,omitempty"`
}
type Sandbox interface {
// Supported returns true if the sandbox is available in the system.
Supported() bool
// Compile performs a compilation in the specified language.
Compile(
ctx *common.Context,
lang string,
inputFiles []string,
chdir, outputFile, errorFile, metaFile, target string,
extraFlags []string,
) (*RunMetadata, error)
// Run uses a previously compiled program and runs it against a single test
// case with the supplied settings.
Run(
ctx *common.Context,
settings *common.ProblemSettings,
lang, chdir, inputFile, outputFile, errorFile, metaFile, target string,
originalInputFile, originalOutputFile, runMetaFile *string,
extraParams []string,
extraMountPoints map[string]string,
) (*RunMetadata, error)
}
type MinijailSandbox struct{}
func (*MinijailSandbox) Supported() bool {
_, err := os.Stat(path.Join(minijailPath, "bin/minijail0"))
return err == nil
}
func (*MinijailSandbox) Compile(
ctx *common.Context,
lang string,
inputFiles []string,
chdir, outputFile, errorFile, metaFile, target string,
extraFlags []string,
) (*RunMetadata, error) {
commonParams := []string{
path.Join(minijailPath, "bin/minijail0"),
"-C", path.Join(minijailPath, "root-compilers"),
"-d", "/home",
"-b", chdir + ",/home,1",
"-1", outputFile,
"-2", errorFile,
"-M", metaFile,
"-t", strconv.Itoa(ctx.Config.Runner.CompileTimeLimit * 1000),
"-O", strconv.Itoa(ctx.Config.Runner.CompileOutputLimit),
}
inputFlags := make([]string, 0)
for _, inputFile := range inputFiles {
if !strings.HasPrefix(inputFile, chdir) {
return &RunMetadata{
Verdict: "JE",
ExitStatus: -1,
}, errors.New("file " + inputFile + " is not within the chroot")
}
rel, err := filepath.Rel(chdir, inputFile)
if err != nil {
return &RunMetadata{
Verdict: "JE",
ExitStatus: -1,
}, err
}
inputFlags = append(inputFlags, rel)
}
var params []string
linkerFlags := make([]string, 0)
switch lang {
case "java":
params = []string{
"-S", path.Join(minijailPath, "scripts/javac"),
"-b", path.Join(minijailPath, "root-openjdk,/usr/lib/jvm"),
"-b", "/sys/,/sys",
"--", "/usr/bin/javac", "-J-Xmx512M",
}
case "c":
params = []string{
"-S", path.Join(minijailPath, "scripts/gcc"),
"--", "/usr/bin/gcc", "-o", target, "-std=c11", "-O2",
}
linkerFlags = append(linkerFlags, "-lm")
case "cpp":
params = []string{
"-S", path.Join(minijailPath, "scripts/gcc"),
"--", "/usr/bin/g++", "-o", target, "-O2",
}
linkerFlags = append(linkerFlags, "-lm")
case "cpp11":
params = []string{
"-S", path.Join(minijailPath, "scripts/gcc"),
"--", "/usr/bin/g++", "-o", target, "-std=c++11", "-O2",
}
linkerFlags = append(linkerFlags, "-lm")
case "pas":
params = []string{
"-S", path.Join(minijailPath, "scripts/fpc"),
"--", "/usr/bin/ldwrapper", "/usr/bin/fpc", "-Tlinux", "-O2",
"-Mobjfpc", "-Sc", "-Sh", fmt.Sprintf("-o%s", target),
}
case "py":
params = []string{
"-S", path.Join(minijailPath, "scripts/pyc"),
"-b", path.Join(minijailPath, "root-python") + ",/usr/lib/python2.7",
"--", "/usr/bin/python", "-m", "py_compile",
}
case "rb":
params = []string{
"-S", path.Join(minijailPath, "scripts/ruby"),
"-b", path.Join(minijailPath, "root-ruby") + ",/usr/lib/ruby",
"--", "/usr/bin/ruby", "-wc",
}
case "kj":
params = []string{
"-S", path.Join(minijailPath, "scripts/kcl"),
"--", "/usr/bin/ldwrapper", "/usr/bin/kcl", "-lj",
"-o", fmt.Sprintf("%s.kx", target), "-c",
}
case "kp":
params = []string{
"-S", path.Join(minijailPath, "scripts/kcl"),
"--", "/usr/bin/ldwrapper", "/usr/bin/kcl", "-lp",
"-o", fmt.Sprintf("%s.kx", target), "-c",
}
case "hs":
params = []string{
"-S", path.Join(minijailPath, "scripts/ghc"),
"-b", path.Join(minijailPath, "root-hs") + ",/usr/lib/ghc",
"--", "/usr/lib/ghc/lib/ghc", "-B/usr/lib/ghc", "-O2", "-o", target,
}
}
finalParams := make([]string, 0)
finalParams = append(finalParams, commonParams...)
finalParams = append(finalParams, params...)
finalParams = append(finalParams, extraFlags...)
finalParams = append(finalParams, inputFlags...)
finalParams = append(finalParams, linkerFlags...)
ctx.Log.Debug("invoking minijail", "params", finalParams)
if err := exec.Command("/usr/bin/sudo", finalParams...).Run(); err != nil {
ctx.Log.Error(
"Minijail execution failed",
"err", err,
)
}
metaFd, err := os.Open(metaFile)
if err != nil {
return &RunMetadata{
Verdict: "JE",
ExitStatus: -1,
}, err
}
defer metaFd.Close()
return parseMetaFile(ctx, nil, lang, metaFd, false)
}
func (*MinijailSandbox) Run(
ctx *common.Context,
settings *common.ProblemSettings,
lang, chdir, inputFile, outputFile, errorFile, metaFile, target string,
originalInputFile, originalOutputFile, runMetaFile *string,
extraParams []string,
extraMountPoints map[string]string,
) (*RunMetadata, error) {
timeLimit := settings.Limits.TimeLimit
if lang == "java" {
timeLimit += 1000
}
commonParams := []string{
path.Join(minijailPath, "bin/minijail0"),
"-C", path.Join(minijailPath, "root"),
"-d", "/home",
"-b", chdir + ",/home",
"-0", inputFile,
"-1", outputFile,
"-2", errorFile,
"-M", metaFile,
"-t", strconv.FormatInt(timeLimit, 10),
"-w", strconv.FormatInt(settings.Limits.ExtraWallTime, 10),
"-O", strconv.FormatInt(settings.Limits.OutputLimit, 10),
"-k", strconv.FormatInt(settings.Limits.StackLimit, 10),
}
extraMinijailFlags := make([]string, 2*len(extraMountPoints))
i := 0
for path, mountTarget := range extraMountPoints {
extraMinijailFlags[i] = "-b"
extraMinijailFlags[i+1] = fmt.Sprintf("%s,%s", path, mountTarget)
i += 2
}
type fileLink struct {
sourceFile, targetFile string
}
fileLinks := []fileLink{}
if originalInputFile != nil {
fileLinks = append(fileLinks, fileLink{
sourceFile: *originalInputFile,
targetFile: path.Join(chdir, "data.in"),
})
}
if originalOutputFile != nil && *originalOutputFile != "/dev/null" {
fileLinks = append(fileLinks, fileLink{
sourceFile: *originalOutputFile,
targetFile: path.Join(chdir, "data.out"),
})
}
if runMetaFile != nil {
fileLinks = append(fileLinks, fileLink{
sourceFile: *runMetaFile,
targetFile: path.Join(chdir, "meta.in"),
})
}
for _, fl := range fileLinks {
if _, err := os.Stat(fl.targetFile); err == nil {
os.Remove(fl.targetFile)
}
if err := os.Link(fl.sourceFile, fl.targetFile); err != nil {
return &RunMetadata{
Verdict: "JE",
ExitStatus: -1,
}, err
}
}
// 16MB + memory limit to prevent some RTE
memoryLimit := (16*1024 + settings.Limits.MemoryLimit) * 1024
// "640MB should be enough for anybody"
hardLimit := strconv.FormatInt(min64(640*1024*1024, memoryLimit), 10)
var params []string
switch lang {
case "java":
params = []string{
"-S", path.Join(minijailPath, "scripts/java"),
"-b", path.Join(minijailPath, "root-openjdk,/usr/lib/jvm"),
"-b", "/sys/,/sys",
"--", "/usr/bin/java", fmt.Sprintf("-Xmx%d", memoryLimit), target,
}
case "c", "cpp", "cpp11":
if settings.Limits.MemoryLimit != -1 {
params = []string{
"-S", path.Join(minijailPath, "scripts/cpp"),
"-m", hardLimit,
}
} else {
params = []string{
"-S", path.Join(minijailPath, "scripts/cpp-debug"),
}
}
params = append(params, "--", fmt.Sprintf("./%s", target))
case "pas":
params = []string{
"-S", path.Join(minijailPath, "scripts/pas"),
"-m", hardLimit, "--", "/usr/bin/ldwrapper", fmt.Sprintf("./%s", target),
}
case "py":
params = []string{
"-S", path.Join(minijailPath, "scripts/py"),
"-b", path.Join(minijailPath, "root-python") + ",/usr/lib/python2.7",
"-m", hardLimit, "--", "/usr/bin/python", fmt.Sprintf("./%s.py", target),
}
case "rb":
params = []string{
"-S", path.Join(minijailPath, "scripts/ruby"),
"-b", path.Join(minijailPath, "root-ruby") + ",/usr/lib/ruby",
"-m", hardLimit, "--", "/usr/bin/ruby", fmt.Sprintf("./%s.rb", target),
}
case "kp", "kj":
params = []string{
"-S", path.Join(minijailPath, "scripts/karel"),
"--", "/usr/bin/ldwrapper", "/usr/bin/karel", "/dev/stdin", "-oi", "-q",
"-p2", fmt.Sprintf("./%s.kx", target),
}
case "hs":
params = []string{
"-S", path.Join(minijailPath, "scripts/hs"),
"-b", path.Join(minijailPath, "root-hs") + ",/usr/lib/ghc",
"-m", hardLimit, "--", fmt.Sprintf("./%s", target),
}
}
finalParams := make([]string, 0)
finalParams = append(finalParams, commonParams...)
finalParams = append(finalParams, extraMinijailFlags...)
finalParams = append(finalParams, params...)
finalParams = append(finalParams, extraParams...)
ctx.Log.Debug("invoking minijail", "params", finalParams)
preloader, err := newInputPreloader(inputFile)
if err != nil {
ctx.Log.Error("Failed to preload input", "file", inputFile, "err", err)
} else if preloader != nil {
// preloader might be nil, even with no error.
preloader.release()
}
if err := exec.Command("/usr/bin/sudo", finalParams...).Run(); err != nil {
ctx.Log.Error(
"Minijail execution failed",
"err", err,
)
}
metaFd, err := os.Open(metaFile)
if err != nil {
return &RunMetadata{
Verdict: "JE",
ExitStatus: -1,
}, err
}
defer metaFd.Close()
return parseMetaFile(ctx, settings, lang, metaFd, lang == "c")
}
func parseMetaFile(
ctx *common.Context,
settings *common.ProblemSettings,
lang string,
metaFile io.Reader,
allowNonZeroExitCode bool,
) (*RunMetadata, error) {
meta := &RunMetadata{
Verdict: "JE",
ExitStatus: -1,
}
scanner := bufio.NewScanner(metaFile)
for scanner.Scan() {
tokens := strings.SplitN(scanner.Text(), ":", 2)
switch tokens[0] {
case "status":
meta.ExitStatus, _ = strconv.Atoi(tokens[1])
case "time":
meta.Time, _ = strconv.ParseFloat(tokens[1], 64)
meta.Time /= 1e6
case "time-sys":
meta.SystemTime, _ = strconv.ParseFloat(tokens[1], 64)
meta.SystemTime /= 1e6
case "time-wall":
meta.WallTime, _ = strconv.ParseFloat(tokens[1], 64)
meta.WallTime /= 1e6
case "mem":
meta.Memory, _ = strconv.ParseInt(tokens[1], 10, 64)
case "signal":
meta.Signal = &tokens[1]
case "signal_number":
stringSignal := fmt.Sprintf("SIGNAL %s", tokens[1])
meta.Signal = &stringSignal
case "syscall":
meta.Syscall = &tokens[1]
case "syscall_number":
stringSyscall := fmt.Sprintf("SYSCALL %s", tokens[1])
meta.Syscall = &stringSyscall
default:
ctx.Log.Warn("Unknown field in .meta file", "tokens", tokens)
}
}
if err := scanner.Err(); err != nil {
return meta, err
}
if meta.Signal != nil {
switch *meta.Signal {
case "SIGILL", "SIGSYS":
meta.Verdict = "RFE"
case "SIGABRT", "SIGFPE", "SIGKILL", "SIGPIPE", "SIGBUS", "SIGSEGV":
meta.Verdict = "RTE"
case "SIGALRM", "SIGXCPU":
meta.Verdict = "TLE"
case "SIGXFSZ":
meta.Verdict = "OLE"
default:
ctx.Log.Error("Received odd signal", "signal", *meta.Signal)
meta.Verdict = "RTE"
}
} else if meta.ExitStatus == 0 || allowNonZeroExitCode {
meta.Verdict = "OK"
} else {
meta.Verdict = "RTE"
}
if lang == "java" {
meta.Memory = max64(0, meta.Memory-ctx.Config.Runner.JavaVmEstimatedSize)
}
if settings != nil &&
settings.Limits.MemoryLimit > 0 &&
meta.Memory > settings.Limits.MemoryLimit &&
(lang != "java" || meta.ExitStatus != 0) {
meta.Verdict = "MLE"
meta.Memory = settings.Limits.MemoryLimit
}
return meta, nil
}
func min64(a, b int64) int64 {
if a < b {
return a
}
return b
}
func max64(a, b int64) int64 {
if a > b {
return a
}
return b
}
|
package linda
import (
"github.com/sirupsen/logrus"
"strconv"
"sync"
"errors"
)
var (
broker Broker
saver Saver
initMutex sync.Mutex
initialized bool
config *Config
quit chan bool
ErrNotInitialized = errors.New("")
)
// Open linda with config
// get instance of broker
func Init(c Config, b Broker, s Saver) error {
initMutex.Lock()
defer initMutex.Unlock()
if !initialized {
logrus.Debugf("init linda...")
config = &c
quit = make(chan bool)
// init the broker
broker = b
// init the saver
saver = s
// set initialized true
initialized = true
}
return nil
}
// Close linda with close broker and saver
func Close() {
initMutex.Lock()
defer initMutex.Unlock()
if initialized {
logrus.Debugf("close linda...")
broker.Close()
saver.Close()
close(quit)
initialized = false
}
}
func Quit() {
quit <- true
}
// Run linda, it also call init function self
func Run() error {
if !initialized {
return ErrNotInitialized
}
defer Close()
if err := run(); err != nil {
return err
}
return nil
}
func run() error {
// poller
poller, err := newPoller()
if err != nil {
return err
}
jobs := poller.poll(config.Queue, config.Timeout, config.Interval)
// workers
var monitor sync.WaitGroup
for i := 0; i < config.WorkerNum; i++ {
worker, err := newWorker(strconv.Itoa(i))
if err != nil {
return err
}
worker.work(jobs, &monitor)
}
monitor.Wait()
return nil
}
add linda init error
package linda
import (
"github.com/sirupsen/logrus"
"strconv"
"sync"
"errors"
)
var (
broker Broker
saver Saver
initMutex sync.Mutex
initialized bool
config *Config
quit chan bool
ErrNotInitialized = errors.New("you must init linda first")
)
// Open linda with config
// get instance of broker
func Init(c Config, b Broker, s Saver) error {
initMutex.Lock()
defer initMutex.Unlock()
if !initialized {
logrus.Debugf("init linda...")
config = &c
quit = make(chan bool)
// init the broker
broker = b
// init the saver
saver = s
// set initialized true
initialized = true
}
return nil
}
// Close linda with close broker and saver
func Close() {
initMutex.Lock()
defer initMutex.Unlock()
if initialized {
logrus.Debugf("close linda...")
broker.Close()
saver.Close()
close(quit)
initialized = false
}
}
func Quit() {
quit <- true
}
// Run linda, it also call init function self
func Run() error {
if !initialized {
return ErrNotInitialized
}
defer Close()
if err := run(); err != nil {
return err
}
return nil
}
func run() error {
// poller
poller, err := newPoller()
if err != nil {
return err
}
jobs := poller.poll(config.Queue, config.Timeout, config.Interval)
// workers
var monitor sync.WaitGroup
for i := 0; i < config.WorkerNum; i++ {
worker, err := newWorker(strconv.Itoa(i))
if err != nil {
return err
}
worker.work(jobs, &monitor)
}
monitor.Wait()
return nil
}
|
package api
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/matzoe/xunlei/cookiejar"
)
var noSuchTaskErr error
var invalidResponseErr error
var unexpectedErr error
var taskNotCompletedErr error
var invalidLoginErr error
var loginFailedErr error
var ReuseSessionErr error
var btTaskAlreadyErr error
var taskNoRedownCapErr error
var defaultConn struct {
*http.Client
sync.Mutex
}
func init() {
jar, _ := cookiejar.New(nil)
defaultConn.Client = &http.Client{nil, nil, jar}
defaultConn.Mutex = sync.Mutex{}
noSuchTaskErr = errors.New("No such TaskId in list.")
invalidResponseErr = errors.New("Invalid response.")
unexpectedErr = errors.New("Unexpected error.")
taskNotCompletedErr = errors.New("Task not completed.")
invalidLoginErr = errors.New("Invalid login account.")
loginFailedErr = errors.New("Login failed.")
ReuseSessionErr = errors.New("Previous session exipred.")
btTaskAlreadyErr = errors.New("Bt task already exists.")
taskNoRedownCapErr = errors.New("Task not capable for restart.")
}
func get(dest string) ([]byte, error) {
log.Println("==>", dest)
req, err := http.NewRequest("GET", dest, nil)
if err != nil {
return nil, err
}
req.Header.Add("User-Agent", user_agent)
req.Header.Add("Accept-Encoding", "gzip, deflate")
retry:
defaultConn.Lock()
resp, err := defaultConn.Do(req)
defaultConn.Unlock()
if err == io.EOF {
goto retry
}
if err != nil {
return nil, err
}
defer resp.Body.Close()
return readBody(resp)
}
func post(dest string, data string) ([]byte, error) {
log.Println("==>", dest)
req, err := http.NewRequest("POST", dest, strings.NewReader(data))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("User-Agent", user_agent)
req.Header.Add("Accept-Encoding", "gzip, deflate")
retry:
defaultConn.Lock()
resp, err := defaultConn.Do(req)
defaultConn.Unlock()
if err == io.EOF {
goto retry
}
if err != nil {
return nil, err
}
defer resp.Body.Close()
return readBody(resp)
}
func Login(id, passhash string) (err error) {
var vcode string
if len(id) == 0 {
err = invalidLoginErr
return
}
loginUrl := fmt.Sprintf("http://login.xunlei.com/check?u=%s&cachetime=%d", id, current_timestamp())
u, _ := url.Parse("http://xunlei.com/")
loop:
if _, err = get(loginUrl); err != nil {
return
}
cks := defaultConn.Client.Jar.Cookies(u)
for i, _ := range cks {
if cks[i].Name == "check_result" {
if len(cks[i].Value) < 3 {
goto loop
}
vcode = cks[i].Value[2:]
vcode = strings.ToUpper(vcode)
log.Println("verify_code:", vcode)
break
}
}
v := url.Values{}
v.Set("u", id)
v.Set("p", hashPass(passhash, vcode))
v.Set("verifycode", vcode)
if _, err = post("http://login.xunlei.com/sec2login/", v.Encode()); err != nil {
return
}
M.Uid = getCookie("http://xunlei.com", "userid")
log.Printf("uid: %s\n", M.Uid)
if len(M.Uid) == 0 {
err = loginFailedErr
return
}
var r []byte
if r, err = get(fmt.Sprintf("%slogin?cachetime=%d&from=0", DOMAIN_LIXIAN, current_timestamp())); err != nil || len(r) < 512 {
err = unexpectedErr
}
return
}
func SaveSession(cookieFile string) error {
return defaultConn.Client.Jar.(*cookiejar.Jar).Save(cookieFile)
}
func ResumeSession(cookieFile string) (err error) {
if cookieFile != "" {
if err = defaultConn.Client.Jar.(*cookiejar.Jar).Load(cookieFile); err != nil {
err = errors.New("Invalid cookie file.")
return
}
}
if !IsOn() {
err = ReuseSessionErr
}
return
}
func IsOn() bool {
uid := getCookie("http://xunlei.com", "userid")
if len(uid) == 0 {
return false
}
r, _ := get(fmt.Sprintf("%suser_task?userid=%s&st=0", DOMAIN_LIXIAN, uid))
if ok, _ := regexp.Match(`top.location='http://cloud.vip.xunlei.com/task.html\?error=`, r); ok {
// log.Println("previous login timeout")
return false
}
if len(M.Uid) == 0 {
M.Uid = uid
}
return true
}
func getCookie(uri, name string) string {
u, _ := url.Parse(uri)
cks := defaultConn.Client.Jar.Cookies(u)
for i, _ := range cks {
if cks[i].Name == name {
return cks[i].Value
}
}
return ""
}
func GetTasks() ([]*Task, error) {
b, err := tasklist_nofresh(_STATUS_mixed, 1)
if err != nil {
return nil, err
}
var resp _task_resp
err = json.Unmarshal(b, &resp)
if err != nil {
return nil, err
}
ts := make([]*Task, 0, len(resp.Info.Tasks))
for i, _ := range resp.Info.Tasks {
resp.Info.Tasks[i].TaskName = unescapeName(resp.Info.Tasks[i].TaskName)
ts = append(ts, &resp.Info.Tasks[i])
}
M.invalidateGroup(_FLAG_normal)
M.pushTasks(ts)
return ts, err
}
func GetCompletedTasks() ([]*Task, error) {
b, err := tasklist_nofresh(_STATUS_completed, 1)
if err != nil {
return nil, err
}
var resp _task_resp
err = json.Unmarshal(b, &resp)
if err != nil {
return nil, err
}
ts := make([]*Task, 0, len(resp.Info.Tasks))
for i, _ := range resp.Info.Tasks {
resp.Info.Tasks[i].TaskName = unescapeName(resp.Info.Tasks[i].TaskName)
ts = append(ts, &resp.Info.Tasks[i])
}
M.pushTasks(ts)
return ts, err
}
func GetIncompletedTasks() ([]*Task, error) {
b, err := tasklist_nofresh(_STATUS_downloading, 1)
if err != nil {
return nil, err
}
var resp _task_resp
err = json.Unmarshal(b, &resp)
if err != nil {
return nil, err
}
ts := make([]*Task, 0, len(resp.Info.Tasks))
for i, _ := range resp.Info.Tasks {
resp.Info.Tasks[i].TaskName = unescapeName(resp.Info.Tasks[i].TaskName)
ts = append(ts, &resp.Info.Tasks[i])
}
M.pushTasks(ts)
return ts, err
}
func GetGdriveId() (gid string, err error) {
if len(M.Gid) == 0 {
var b []byte
b, err = tasklist_nofresh(_STATUS_mixed, 1)
if err != nil {
return
}
var resp _task_resp
err = json.Unmarshal(b, &resp)
if err != nil {
return
}
M.Gid = resp.Info.User.Cookie
M.Account = &resp.Info.User
M.AccountInfo = &resp.UserInfo
}
gid = M.Gid
log.Println("gdriveid:", gid)
return
}
func tasklist_nofresh(tid, page int) ([]byte, error) {
/*
tid:
1 downloading
2 completed
4 downloading|completed|expired
11 deleted - not used now?
13 expired - not used now?
*/
if tid != 4 && tid != 1 && tid != 2 {
tid = 4
}
uri := fmt.Sprintf(SHOWTASK_UNFRESH, tid, page, _page_size, page)
r, err := get(uri)
if err != nil {
return nil, err
}
exp := regexp.MustCompile(`rebuild\((\{.*\})\)`)
s := exp.FindSubmatch(r)
if s == nil {
return nil, invalidResponseErr
}
return s[1], nil
}
func readExpired() ([]byte, error) {
uri := fmt.Sprintf(EXPIRE_HOME, M.Uid)
log.Println("==>", uri)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return nil, err
}
req.Header.Add("User-Agent", user_agent)
req.Header.Add("Accept-Encoding", "gzip, deflate")
req.AddCookie(&http.Cookie{Name: "lx_nf_all", Value: url.QueryEscape(_expired_ck)})
req.AddCookie(&http.Cookie{Name: "pagenum", Value: _page_size})
retry:
defaultConn.Lock()
resp, err := defaultConn.Do(req)
defaultConn.Unlock()
if err == io.EOF {
goto retry
}
if err != nil {
return nil, err
}
defer resp.Body.Close()
return readBody(resp)
}
func GetExpiredTasks() ([]*Task, error) {
r, err := readExpired()
ts, _ := parseHistory(r, "4")
M.invalidateGroup(_FLAG_expired)
M.pushTasks(ts)
return ts, err
}
func GetDeletedTasks() ([]*Task, error) {
j := 0
next := true
var err error
var r []byte
var ts []*Task
tss := make([]*Task, 0, 10)
for next {
j++
r, err = readHistory(j)
ts, next = parseHistory(r, "1")
tss = append(tss, ts...)
}
M.invalidateGroup(_FLAG_deleted)
M.invalidateGroup(_FLAG_purged)
M.pushTasks(tss)
return tss, err
}
func readHistory(page int) ([]byte, error) {
var uri string
if page > 0 {
uri = fmt.Sprintf(HISTORY_PAGE, M.Uid, page)
} else {
uri = fmt.Sprintf(HISTORY_HOME, M.Uid)
}
log.Println("==>", uri)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return nil, err
}
req.Header.Add("User-Agent", user_agent)
req.Header.Add("Accept-Encoding", "gzip, deflate")
req.AddCookie(&http.Cookie{Name: "lx_nf_all", Value: url.QueryEscape(_deleted_ck)})
req.AddCookie(&http.Cookie{Name: "pagenum", Value: _page_size})
retry:
defaultConn.Lock()
resp, err := defaultConn.Do(req)
defaultConn.Unlock()
if err == io.EOF {
goto retry
}
if err != nil {
return nil, err
}
defer resp.Body.Close()
return readBody(resp)
}
func parseHistory(in []byte, ty string) ([]*Task, bool) {
es := `<input id="d_status(\d+)"[^<>]+value="(.*)" />\s+<input id="dflag\d+"[^<>]+value="(.*)" />\s+<input id="dcid\d+"[^<>]+value="(.*)" />\s+<input id="f_url\d+"[^<>]+value="(.*)" />\s+<input id="taskname\d+"[^<>]+value="(.*)" />\s+<input id="d_tasktype\d+"[^<>]+value="(.*)" />`
exp := regexp.MustCompile(es)
s := exp.FindAllSubmatch(in, -1)
ret := make([]*Task, len(s))
for i, _ := range s {
b, _ := strconv.Atoi(string(s[i][7]))
ret[i] = &Task{Id: string(s[i][1]), DownloadStatus: string(s[i][2]), Cid: string(s[i][4]), URL: string(s[i][5]), TaskName: unescapeName(string(s[i][6])), TaskType: byte(b), Flag: ty}
}
exp = regexp.MustCompile(`<li class="next"><a href="([^"]+)">[^<>]*</a></li>`)
return ret, exp.FindSubmatch(in) != nil
}
func DelayTask(taskid string) error {
uri := fmt.Sprintf(TASKDELAY_URL, taskid+"_1", "task", current_timestamp())
r, err := get(uri)
if err != nil {
return err
}
exp := regexp.MustCompile(`^task_delay_resp\((.*}),\[.*\]\)`)
s := exp.FindSubmatch(r)
if s == nil {
return invalidResponseErr
}
var resp struct {
K struct {
Llt string `json:"left_live_time"`
} `json:"0"`
Result byte `json:"result"`
}
json.Unmarshal(s[1], &resp)
log.Printf("%s: %s\n", taskid, resp.K.Llt)
return nil
}
func redownload(tasks []*Task) error {
form := make([]string, 0, len(tasks)+2)
for i, _ := range tasks {
if tasks[i].expired() || !tasks[i].failed() || !tasks[i].pending() {
continue
}
v := url.Values{}
v.Add("id[]", tasks[i].Id)
v.Add("url[]", tasks[i].URL)
v.Add("cid[]", tasks[i].Cid)
v.Add("download_status[]", tasks[i].DownloadStatus)
v.Add("taskname[]", tasks[i].TaskName)
form = append(form, v.Encode())
}
if len(form) == 0 {
return errors.New("No tasks need to restart.")
}
form = append(form, "type=1")
form = append(form, "interfrom=task")
uri := fmt.Sprintf(REDOWNLOAD_URL, current_timestamp())
r, err := post(uri, strings.Join(form, "&"))
if err != nil {
return err
}
log.Printf("%s\n", r)
return nil
}
func FillBtList(taskid, infohash string) (*bt_list, error) {
var pgsize = _bt_page_size
retry:
m, err := fillBtList(taskid, infohash, 1, pgsize)
if err == io.ErrUnexpectedEOF && pgsize == _bt_page_size {
pgsize = "100"
goto retry
}
if err != nil {
return nil, err
}
var list = bt_list{}
list.BtNum = m.BtNum
list.Id = m.Id
list.InfoId = m.InfoId
if len(m.Record) > 0 {
list.Record = append(list.Record, m.Record...)
}
total, _ := strconv.Atoi(list.BtNum)
size, _ := strconv.Atoi(pgsize)
pageNum := total/size + 1
next := 2
for next <= pageNum {
m, err = fillBtList(taskid, infohash, next, pgsize)
if err == nil {
if len(m.Record) > 0 {
list.Record = append(list.Record, m.Record...)
}
next++
} else {
log.Println("err in fillBtList()")
}
}
return &list, nil
}
func fillBtList(taskid, infohash string, page int, pgsize string) (*_bt_list, error) {
uri := fmt.Sprintf(FILLBTLIST_URL, taskid, infohash, page, M.Uid, "task", current_timestamp())
log.Println("==>", uri)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return nil, err
}
req.Header.Add("User-Agent", user_agent)
req.Header.Add("Accept-Encoding", "gzip, deflate")
req.AddCookie(&http.Cookie{Name: "pagenum", Value: pgsize})
retry:
defaultConn.Lock()
resp, err := defaultConn.Do(req)
defaultConn.Unlock()
if err == io.EOF {
goto retry
}
if err != nil {
return nil, err
}
r, err := readBody(resp)
resp.Body.Close()
if err != nil {
return nil, err
}
exp := regexp.MustCompile(`fill_bt_list\({"Result":(.*)}\)`)
s := exp.FindSubmatch(r)
if s == nil {
exp = regexp.MustCompile(`alert\('(.*)'\);.*`)
s = exp.FindSubmatch(r)
if s != nil {
return nil, errors.New(string(s[1]))
}
return nil, invalidResponseErr
}
var bt_list _bt_list
json.Unmarshal(s[1], &bt_list)
exp = regexp.MustCompile(`\\`)
for i, _ := range bt_list.Record {
bt_list.Record[i].FileName = exp.ReplaceAllLiteralString(bt_list.Record[i].FileName, `/`)
bt_list.Record[i].FileName = unescapeName(bt_list.Record[i].FileName)
}
return &bt_list, nil
}
// supported uri schemes:
// 'ed2k', 'http', 'https', 'ftp', 'bt', 'magnet', 'thunder', 'Flashget', 'qqdl'
func AddTask(req string) error {
ttype := _TASK_TYPE
if strings.HasPrefix(req, "magnet:") || strings.Contains(req, "get_torrent?userid=") {
ttype = _TASK_TYPE_MAGNET
} else if strings.HasPrefix(req, "ed2k://") {
ttype = _TASK_TYPE_ED2K
} else if strings.HasPrefix(req, "bt://") || strings.HasSuffix(req, ".torrent") {
ttype = _TASK_TYPE_BT
} else if ok, _ := regexp.MatchString(`^[a-zA-Z0-9]{40,40}$`, req); ok {
ttype = _TASK_TYPE_BT
req = "bt://" + req
}
switch ttype {
case _TASK_TYPE, _TASK_TYPE_ED2K:
return addSimpleTask(req)
case _TASK_TYPE_BT:
return addBtTask(req)
case _TASK_TYPE_MAGNET:
return addMagnetTask(req)
case _TASK_TYPE_INVALID:
fallthrough
default:
return unexpectedErr
}
panic(unexpectedErr.Error())
}
func AddBatchTasks(urls []string, oids ...string) error {
// TODO: filter urls
v := url.Values{}
for i := 0; i < len(urls); i++ {
v.Add("cid[]", "")
v.Add("url[]", url.QueryEscape(urls[i]))
}
v.Add("class_id", "0")
if len(oids) > 0 {
var b bytes.Buffer
for i := 0; i < len(oids); i++ {
b.WriteString("0,")
}
v.Add("batch_old_taskid", strings.Join(oids, ","))
v.Add("batch_old_database", b.String())
v.Add("interfrom", "history")
} else {
v.Add("batch_old_taskid", "0,")
v.Add("batch_old_database", "0,")
v.Add("interfrom", "task")
}
tm := current_timestamp()
uri := fmt.Sprintf(BATCHTASKCOMMIT_URL, tm, tm)
r, err := post(uri, v.Encode())
fmt.Printf("%s\n", r)
return err
}
func addSimpleTask(uri string, oid ...string) error {
var from string
if len(oid) > 0 {
from = "history"
} else {
from = "task"
}
dest := fmt.Sprintf(TASKCHECK_URL, url.QueryEscape(uri), from, current_random(), current_timestamp())
r, err := get(dest)
if err == nil {
task_pre, err := getTaskPre(r)
if err != nil {
return err
}
var t_type string
if strings.HasPrefix(uri, "http://") || strings.HasPrefix(uri, "ftp://") || strings.HasPrefix(uri, "https://") {
t_type = strconv.Itoa(_TASK_TYPE)
} else if strings.HasPrefix(uri, "ed2k://") {
t_type = strconv.Itoa(_TASK_TYPE_ED2K)
} else {
return errors.New("Invalid protocol scheme.")
}
v := url.Values{}
v.Add("callback", "ret_task")
v.Add("uid", M.Uid)
v.Add("cid", task_pre.Cid)
v.Add("gcid", task_pre.GCid)
v.Add("size", task_pre.SizeCost)
v.Add("goldbean", task_pre.Goldbean)
v.Add("silverbean", task_pre.Silverbean)
v.Add("t", task_pre.FileName)
v.Add("url", uri)
v.Add("type", t_type)
if len(oid) > 0 {
v.Add("o_taskid", oid[0])
v.Add("o_page", "history")
} else {
v.Add("o_page", "task")
v.Add("o_taskid", "0")
}
dest = TASKCOMMIT_URL + v.Encode()
r, err = get(dest)
if err != nil {
return err
}
if ok, _ := regexp.Match(`ret_task\(.*\)`, r); ok {
return nil
} else {
return invalidResponseErr
}
}
return err
}
func addBtTask(uri string) error {
if strings.HasPrefix(uri, "bt://") {
return addMagnetTask(fmt.Sprintf(GETTORRENT_URL, M.Uid, uri[5:]))
}
return addTorrentTask(uri)
}
func addMagnetTask(link string, oid ...string) error {
uri := fmt.Sprintf(URLQUERY_URL, url.QueryEscape(link), current_random())
r, err := get(uri)
if err != nil {
return err
}
exp := regexp.MustCompile(`queryUrl\((1,.*)\)`)
s := exp.FindSubmatch(r)
if s == nil {
if ok, _ := regexp.Match(`queryUrl\(-1,'[0-9A-Za-z]{40,40}'.*`, r); ok {
return btTaskAlreadyErr
}
return invalidResponseErr
}
if task := evalParse(s[1]); task != nil {
v := url.Values{}
v.Add("uid", M.Uid)
v.Add("btname", task.Name)
v.Add("cid", task.InfoId)
v.Add("tsize", task.Size)
findex := strings.Join(task.Index, "_")
size := strings.Join(task.Sizes, "_")
v.Add("findex", findex)
v.Add("size", size)
if len(oid) > 0 {
v.Add("from", "history")
v.Add("o_taskid", oid[0])
v.Add("o_page", "history")
} else {
v.Add("from", "task")
}
dest := fmt.Sprintf(BTTASKCOMMIT_URL, current_timestamp())
r, err = post(dest, v.Encode())
exp = regexp.MustCompile(`jsonp.*\(\{"id":"(\d+)","avail_space":"\d+".*\}\)`)
s = exp.FindSubmatch(r)
if s == nil {
return invalidResponseErr
}
} else {
return invalidResponseErr
}
return nil
}
func addTorrentTask(filename string) (err error) {
var file *os.File
if file, err = os.Open(filename); err != nil {
return
}
defer file.Close()
// if _, err = taipei.GetMetaInfo(filename); err != nil {
// return
// }
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
var part io.Writer
if part, err = writer.CreateFormFile("filepath", filename); err != nil {
return
}
io.Copy(part, file)
writer.WriteField("random", current_random())
writer.WriteField("interfrom", "task")
dest := TORRENTUPLOAD_URL
log.Println("==>", dest)
req, err := http.NewRequest("POST", dest, bytes.NewReader(body.Bytes()))
if err != nil {
return
}
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Add("User-Agent", user_agent)
req.Header.Add("Accept-Encoding", "gzip, deflate")
retry:
defaultConn.Lock()
resp, err := defaultConn.Do(req)
defaultConn.Unlock()
if err == io.EOF {
goto retry
}
if err != nil {
return
}
defer resp.Body.Close()
r, err := readBody(resp)
exp := regexp.MustCompile(`<script>document\.domain="xunlei\.com";var btResult =(\{.+\});var btRtcode = 0</script>`)
s := exp.FindSubmatch(r)
if s != nil {
var result _btup_result
json.Unmarshal(s[1], &result)
v := url.Values{}
v.Add("uid", M.Uid)
v.Add("btname", result.Name) // TODO: filter illegal char
v.Add("cid", result.InfoId)
v.Add("tsize", strconv.Itoa(result.Size))
findex := make([]string, 0, len(result.List))
size := make([]string, 0, len(result.List))
for i := 0; i < len(result.List); i++ {
findex = append(findex, result.List[i].Id)
size = append(size, result.List[i].Size)
}
v.Add("findex", strings.Join(findex, "_"))
v.Add("size", strings.Join(size, "_"))
v.Add("from", "0")
dest = fmt.Sprintf(BTTASKCOMMIT_URL, current_timestamp())
r, err = post(dest, v.Encode())
exp = regexp.MustCompile(`jsonp.*\(\{"id":"(\d+)","avail_space":"\d+".*\}\)`)
s = exp.FindSubmatch(r)
if s == nil {
return invalidResponseErr
}
// tasklist_nofresh(4, 1)
// FillBtList(string(s[1]))
return nil
}
exp = regexp.MustCompile(`parent\.edit_bt_list\((\{.*\}),'`)
s = exp.FindSubmatch(r)
if s == nil {
return errors.New("Add bt task failed.")
}
// var result _btup_result
// json.Unmarshal(s[1], &result)
return btTaskAlreadyErr
}
func ProcessTaskDaemon(ch chan byte, callback func(*Task)) {
if len(M.Tasks) == 0 {
GetIncompletedTasks()
}
go func() {
for {
select {
case <-ch:
err := process_task(M.Tasks, callback)
if err != nil {
log.Println("error in ProcessTask():", err)
}
case <-time.After(60 * time.Second):
err := process_task(M.Tasks, callback)
if err != nil {
log.Println("error in ProcessTask():", err)
time.Sleep(5 * time.Second)
ch <- 1
}
}
}
}()
}
func ProcessTask(callback func(*Task)) error {
return process_task(M.Tasks, callback)
}
func process_task(tasks map[string]*Task, callback func(*Task)) error {
l := len(tasks)
if l == 0 {
return errors.New("No tasks in progress.")
}
ct := current_timestamp()
uri := fmt.Sprintf(TASKPROCESS_URL, ct, ct)
v := url.Values{}
list := make([]string, 0, l)
nm_list := make([]string, 0, l)
bt_list := make([]string, 0, l)
for i, _ := range tasks {
if tasks[i].status() == _FLAG_normal && tasks[i].DownloadStatus == "1" {
list = append(list, tasks[i].Id)
if tasks[i].TaskType == 0 {
bt_list = append(bt_list, tasks[i].Id)
} else {
nm_list = append(nm_list, tasks[i].Id)
}
}
}
v.Add("list", strings.Join(list, ","))
v.Add("nm_list", strings.Join(nm_list, ","))
v.Add("bt_list", strings.Join(bt_list, ","))
v.Add("uid", M.Uid)
v.Add("interfrom", "task")
var r []byte
var err error
if r, err = post(uri, v.Encode()); err != nil {
return err
}
exp := regexp.MustCompile(`jsonp\d+\(\{"Process":(.*)\}\)`)
s := exp.FindSubmatch(r)
if s == nil {
return invalidResponseErr
}
var res _ptask_resp
err = json.Unmarshal(s[1], &res)
if err != nil {
return err
}
for i, _ := range res.List {
task := tasks[res.List[i].Id]
task.update(&res.List[i])
if callback != nil {
callback(task)
}
}
return nil
}
func GetTorrentByHash(hash string) ([]byte, error) {
uri := fmt.Sprintf(GETTORRENT_URL, M.Uid, strings.ToUpper(hash))
r, err := get(uri)
if err != nil {
return nil, err
}
exp := regexp.MustCompile(`alert\('(.*)'\)`)
s := exp.FindSubmatch(r)
if s != nil {
log.Printf("%s\n", s[1])
return nil, invalidResponseErr
}
return r, nil
}
func GetTorrentFileByHash(hash, file string) error {
if stat, err := os.Stat(file); err == nil || stat != nil {
return errors.New("Target file already exists.")
}
r, err := GetTorrentByHash(hash)
if err != nil {
return err
}
return ioutil.WriteFile(file, r, 0644)
}
func PauseTasks(ids []string) error {
tids := strings.Join(ids, ",")
tids += ","
uri := fmt.Sprintf(TASKPAUSE_URL, tids, M.Uid, current_timestamp())
r, err := get(uri)
if err != nil {
return err
}
if bytes.Compare(r, []byte("pause_task_resp()")) != 0 {
return invalidResponseErr
}
return nil
}
func DelayAllTasks() error {
r, err := get(DELAYONCE_URL)
if err != nil {
return err
}
log.Printf("%s\n", r)
return nil
}
func ReAddTasks(ts map[string]*Task) {
nbt := make([]*Task, 0, len(ts))
bt := make([]*Task, 0, len(ts))
for i, _ := range ts {
if ts[i].expired() || ts[i].deleted() {
if ts[i].IsBt() {
bt = append(bt, ts[i])
} else {
nbt = append(nbt, ts[i])
}
}
}
if len(nbt) == 1 {
if err := nbt[0].Readd(); err != nil {
log.Println(err)
}
} else if len(nbt) > 1 {
urls, ids := extractTasks(nbt)
if err := AddBatchTasks(urls, ids...); err != nil {
log.Println(err)
}
}
for i, _ := range bt {
if err := addMagnetTask(fmt.Sprintf(GETTORRENT_URL, M.Uid, bt[i].Cid), bt[i].Id); err != nil {
log.Println(err)
}
}
}
func RenameTask(taskid, newname string) error {
t := M.getTaskbyId(taskid)
if t == nil {
return noSuchTaskErr
}
return rename_task(taskid, newname, t.TaskType)
}
func rename_task(taskid, newname string, tasktype byte) error {
v := url.Values{}
v.Add("taskid", taskid)
if tasktype == 0 {
v.Add("bt", "1")
} else {
v.Add("bt", "0")
}
v.Add("filename", newname)
r, err := get(RENAME_URL + v.Encode())
if err != nil {
return err
}
var resp struct {
Result int `json:"result"`
TaskId int `json:"taskid"`
FileName string `json:"filename"`
}
json.Unmarshal(r[1:len(r)-1], &resp)
if resp.Result != 0 {
return fmt.Errorf("error in rename task: %d", resp.Result)
}
log.Println(resp.TaskId, "=>", resp.FileName)
return nil
}
func DeleteTasks(ids []string) error {
return nil
}
func DeleteTask(taskid string) error {
t := M.getTaskbyId(taskid)
if t == nil {
return noSuchTaskErr
}
return t.Remove()
}
func PurgeTask(taskid string) error {
t := M.getTaskbyId(taskid)
if t == nil {
return noSuchTaskErr
}
return t.Purge()
}
func ResumeTasks(pattern string) error {
return nil
}
add response status in log
package api
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"sync"
"time"
"github.com/matzoe/xunlei/cookiejar"
)
var noSuchTaskErr error
var invalidResponseErr error
var unexpectedErr error
var taskNotCompletedErr error
var invalidLoginErr error
var loginFailedErr error
var ReuseSessionErr error
var btTaskAlreadyErr error
var taskNoRedownCapErr error
var defaultConn struct {
*http.Client
sync.Mutex
}
func init() {
jar, _ := cookiejar.New(nil)
defaultConn.Client = &http.Client{nil, nil, jar}
defaultConn.Mutex = sync.Mutex{}
noSuchTaskErr = errors.New("No such TaskId in list.")
invalidResponseErr = errors.New("Invalid response.")
unexpectedErr = errors.New("Unexpected error.")
taskNotCompletedErr = errors.New("Task not completed.")
invalidLoginErr = errors.New("Invalid login account.")
loginFailedErr = errors.New("Login failed.")
ReuseSessionErr = errors.New("Previous session exipred.")
btTaskAlreadyErr = errors.New("Bt task already exists.")
taskNoRedownCapErr = errors.New("Task not capable for restart.")
}
func get(dest string) ([]byte, error) {
log.Println("==>", dest)
req, err := http.NewRequest("GET", dest, nil)
if err != nil {
return nil, err
}
req.Header.Add("User-Agent", user_agent)
req.Header.Add("Accept-Encoding", "gzip, deflate")
retry:
defaultConn.Lock()
resp, err := defaultConn.Do(req)
defaultConn.Unlock()
if err == io.EOF {
goto retry
}
if err != nil {
return nil, err
}
defer resp.Body.Close()
log.Println(resp.Status)
return readBody(resp)
}
func post(dest string, data string) ([]byte, error) {
log.Println("==>", dest)
req, err := http.NewRequest("POST", dest, strings.NewReader(data))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("User-Agent", user_agent)
req.Header.Add("Accept-Encoding", "gzip, deflate")
retry:
defaultConn.Lock()
resp, err := defaultConn.Do(req)
defaultConn.Unlock()
if err == io.EOF {
goto retry
}
if err != nil {
return nil, err
}
defer resp.Body.Close()
log.Println(resp.Status)
return readBody(resp)
}
func Login(id, passhash string) (err error) {
var vcode string
if len(id) == 0 {
err = invalidLoginErr
return
}
loginUrl := fmt.Sprintf("http://login.xunlei.com/check?u=%s&cachetime=%d", id, current_timestamp())
u, _ := url.Parse("http://xunlei.com/")
loop:
if _, err = get(loginUrl); err != nil {
return
}
cks := defaultConn.Client.Jar.Cookies(u)
for i, _ := range cks {
if cks[i].Name == "check_result" {
if len(cks[i].Value) < 3 {
goto loop
}
vcode = cks[i].Value[2:]
vcode = strings.ToUpper(vcode)
log.Println("verify_code:", vcode)
break
}
}
v := url.Values{}
v.Set("u", id)
v.Set("p", hashPass(passhash, vcode))
v.Set("verifycode", vcode)
if _, err = post("http://login.xunlei.com/sec2login/", v.Encode()); err != nil {
return
}
M.Uid = getCookie("http://xunlei.com", "userid")
log.Printf("uid: %s\n", M.Uid)
if len(M.Uid) == 0 {
err = loginFailedErr
return
}
var r []byte
if r, err = get(fmt.Sprintf("%slogin?cachetime=%d&from=0", DOMAIN_LIXIAN, current_timestamp())); err != nil || len(r) < 512 {
err = unexpectedErr
}
return
}
func SaveSession(cookieFile string) error {
return defaultConn.Client.Jar.(*cookiejar.Jar).Save(cookieFile)
}
func ResumeSession(cookieFile string) (err error) {
if cookieFile != "" {
if err = defaultConn.Client.Jar.(*cookiejar.Jar).Load(cookieFile); err != nil {
err = errors.New("Invalid cookie file.")
return
}
}
if !IsOn() {
err = ReuseSessionErr
}
return
}
func IsOn() bool {
uid := getCookie("http://xunlei.com", "userid")
if len(uid) == 0 {
return false
}
r, _ := get(fmt.Sprintf("%suser_task?userid=%s&st=0", DOMAIN_LIXIAN, uid))
if ok, _ := regexp.Match(`top.location='http://cloud.vip.xunlei.com/task.html\?error=`, r); ok {
// log.Println("previous login timeout")
return false
}
if len(M.Uid) == 0 {
M.Uid = uid
}
return true
}
func getCookie(uri, name string) string {
u, _ := url.Parse(uri)
cks := defaultConn.Client.Jar.Cookies(u)
for i, _ := range cks {
if cks[i].Name == name {
return cks[i].Value
}
}
return ""
}
func GetTasks() ([]*Task, error) {
b, err := tasklist_nofresh(_STATUS_mixed, 1)
if err != nil {
return nil, err
}
var resp _task_resp
err = json.Unmarshal(b, &resp)
if err != nil {
return nil, err
}
ts := make([]*Task, 0, len(resp.Info.Tasks))
for i, _ := range resp.Info.Tasks {
resp.Info.Tasks[i].TaskName = unescapeName(resp.Info.Tasks[i].TaskName)
ts = append(ts, &resp.Info.Tasks[i])
}
M.invalidateGroup(_FLAG_normal)
M.pushTasks(ts)
return ts, err
}
func GetCompletedTasks() ([]*Task, error) {
b, err := tasklist_nofresh(_STATUS_completed, 1)
if err != nil {
return nil, err
}
var resp _task_resp
err = json.Unmarshal(b, &resp)
if err != nil {
return nil, err
}
ts := make([]*Task, 0, len(resp.Info.Tasks))
for i, _ := range resp.Info.Tasks {
resp.Info.Tasks[i].TaskName = unescapeName(resp.Info.Tasks[i].TaskName)
ts = append(ts, &resp.Info.Tasks[i])
}
M.pushTasks(ts)
return ts, err
}
func GetIncompletedTasks() ([]*Task, error) {
b, err := tasklist_nofresh(_STATUS_downloading, 1)
if err != nil {
return nil, err
}
var resp _task_resp
err = json.Unmarshal(b, &resp)
if err != nil {
return nil, err
}
ts := make([]*Task, 0, len(resp.Info.Tasks))
for i, _ := range resp.Info.Tasks {
resp.Info.Tasks[i].TaskName = unescapeName(resp.Info.Tasks[i].TaskName)
ts = append(ts, &resp.Info.Tasks[i])
}
M.pushTasks(ts)
return ts, err
}
func GetGdriveId() (gid string, err error) {
if len(M.Gid) == 0 {
var b []byte
b, err = tasklist_nofresh(_STATUS_mixed, 1)
if err != nil {
return
}
var resp _task_resp
err = json.Unmarshal(b, &resp)
if err != nil {
return
}
M.Gid = resp.Info.User.Cookie
M.Account = &resp.Info.User
M.AccountInfo = &resp.UserInfo
}
gid = M.Gid
log.Println("gdriveid:", gid)
return
}
func tasklist_nofresh(tid, page int) ([]byte, error) {
/*
tid:
1 downloading
2 completed
4 downloading|completed|expired
11 deleted - not used now?
13 expired - not used now?
*/
if tid != 4 && tid != 1 && tid != 2 {
tid = 4
}
uri := fmt.Sprintf(SHOWTASK_UNFRESH, tid, page, _page_size, page)
r, err := get(uri)
if err != nil {
return nil, err
}
exp := regexp.MustCompile(`rebuild\((\{.*\})\)`)
s := exp.FindSubmatch(r)
if s == nil {
return nil, invalidResponseErr
}
return s[1], nil
}
func readExpired() ([]byte, error) {
uri := fmt.Sprintf(EXPIRE_HOME, M.Uid)
log.Println("==>", uri)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return nil, err
}
req.Header.Add("User-Agent", user_agent)
req.Header.Add("Accept-Encoding", "gzip, deflate")
req.AddCookie(&http.Cookie{Name: "lx_nf_all", Value: url.QueryEscape(_expired_ck)})
req.AddCookie(&http.Cookie{Name: "pagenum", Value: _page_size})
retry:
defaultConn.Lock()
resp, err := defaultConn.Do(req)
defaultConn.Unlock()
if err == io.EOF {
goto retry
}
if err != nil {
return nil, err
}
defer resp.Body.Close()
log.Println(resp.Status)
return readBody(resp)
}
func GetExpiredTasks() ([]*Task, error) {
r, err := readExpired()
ts, _ := parseHistory(r, "4")
M.invalidateGroup(_FLAG_expired)
M.pushTasks(ts)
return ts, err
}
func GetDeletedTasks() ([]*Task, error) {
j := 0
next := true
var err error
var r []byte
var ts []*Task
tss := make([]*Task, 0, 10)
for next {
j++
r, err = readHistory(j)
ts, next = parseHistory(r, "1")
tss = append(tss, ts...)
}
M.invalidateGroup(_FLAG_deleted)
M.invalidateGroup(_FLAG_purged)
M.pushTasks(tss)
return tss, err
}
func readHistory(page int) ([]byte, error) {
var uri string
if page > 0 {
uri = fmt.Sprintf(HISTORY_PAGE, M.Uid, page)
} else {
uri = fmt.Sprintf(HISTORY_HOME, M.Uid)
}
log.Println("==>", uri)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return nil, err
}
req.Header.Add("User-Agent", user_agent)
req.Header.Add("Accept-Encoding", "gzip, deflate")
req.AddCookie(&http.Cookie{Name: "lx_nf_all", Value: url.QueryEscape(_deleted_ck)})
req.AddCookie(&http.Cookie{Name: "pagenum", Value: _page_size})
retry:
defaultConn.Lock()
resp, err := defaultConn.Do(req)
defaultConn.Unlock()
if err == io.EOF {
goto retry
}
if err != nil {
return nil, err
}
defer resp.Body.Close()
log.Println(resp.Status)
return readBody(resp)
}
func parseHistory(in []byte, ty string) ([]*Task, bool) {
es := `<input id="d_status(\d+)"[^<>]+value="(.*)" />\s+<input id="dflag\d+"[^<>]+value="(.*)" />\s+<input id="dcid\d+"[^<>]+value="(.*)" />\s+<input id="f_url\d+"[^<>]+value="(.*)" />\s+<input id="taskname\d+"[^<>]+value="(.*)" />\s+<input id="d_tasktype\d+"[^<>]+value="(.*)" />`
exp := regexp.MustCompile(es)
s := exp.FindAllSubmatch(in, -1)
ret := make([]*Task, len(s))
for i, _ := range s {
b, _ := strconv.Atoi(string(s[i][7]))
ret[i] = &Task{Id: string(s[i][1]), DownloadStatus: string(s[i][2]), Cid: string(s[i][4]), URL: string(s[i][5]), TaskName: unescapeName(string(s[i][6])), TaskType: byte(b), Flag: ty}
}
exp = regexp.MustCompile(`<li class="next"><a href="([^"]+)">[^<>]*</a></li>`)
return ret, exp.FindSubmatch(in) != nil
}
func DelayTask(taskid string) error {
uri := fmt.Sprintf(TASKDELAY_URL, taskid+"_1", "task", current_timestamp())
r, err := get(uri)
if err != nil {
return err
}
exp := regexp.MustCompile(`^task_delay_resp\((.*}),\[.*\]\)`)
s := exp.FindSubmatch(r)
if s == nil {
return invalidResponseErr
}
var resp struct {
K struct {
Llt string `json:"left_live_time"`
} `json:"0"`
Result byte `json:"result"`
}
json.Unmarshal(s[1], &resp)
log.Printf("%s: %s\n", taskid, resp.K.Llt)
return nil
}
func redownload(tasks []*Task) error {
form := make([]string, 0, len(tasks)+2)
for i, _ := range tasks {
if tasks[i].expired() || !tasks[i].failed() || !tasks[i].pending() {
continue
}
v := url.Values{}
v.Add("id[]", tasks[i].Id)
v.Add("url[]", tasks[i].URL)
v.Add("cid[]", tasks[i].Cid)
v.Add("download_status[]", tasks[i].DownloadStatus)
v.Add("taskname[]", tasks[i].TaskName)
form = append(form, v.Encode())
}
if len(form) == 0 {
return errors.New("No tasks need to restart.")
}
form = append(form, "type=1")
form = append(form, "interfrom=task")
uri := fmt.Sprintf(REDOWNLOAD_URL, current_timestamp())
r, err := post(uri, strings.Join(form, "&"))
if err != nil {
return err
}
log.Printf("%s\n", r)
return nil
}
func FillBtList(taskid, infohash string) (*bt_list, error) {
var pgsize = _bt_page_size
retry:
m, err := fillBtList(taskid, infohash, 1, pgsize)
if err == io.ErrUnexpectedEOF && pgsize == _bt_page_size {
pgsize = "100"
goto retry
}
if err != nil {
return nil, err
}
var list = bt_list{}
list.BtNum = m.BtNum
list.Id = m.Id
list.InfoId = m.InfoId
if len(m.Record) > 0 {
list.Record = append(list.Record, m.Record...)
}
total, _ := strconv.Atoi(list.BtNum)
size, _ := strconv.Atoi(pgsize)
pageNum := total/size + 1
next := 2
for next <= pageNum {
m, err = fillBtList(taskid, infohash, next, pgsize)
if err == nil {
if len(m.Record) > 0 {
list.Record = append(list.Record, m.Record...)
}
next++
} else {
log.Println("err in fillBtList()")
}
}
return &list, nil
}
func fillBtList(taskid, infohash string, page int, pgsize string) (*_bt_list, error) {
uri := fmt.Sprintf(FILLBTLIST_URL, taskid, infohash, page, M.Uid, "task", current_timestamp())
log.Println("==>", uri)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return nil, err
}
req.Header.Add("User-Agent", user_agent)
req.Header.Add("Accept-Encoding", "gzip, deflate")
req.AddCookie(&http.Cookie{Name: "pagenum", Value: pgsize})
retry:
defaultConn.Lock()
resp, err := defaultConn.Do(req)
defaultConn.Unlock()
if err == io.EOF {
goto retry
}
if err != nil {
return nil, err
}
log.Println(resp.Status)
r, err := readBody(resp)
resp.Body.Close()
if err != nil {
return nil, err
}
exp := regexp.MustCompile(`fill_bt_list\({"Result":(.*)}\)`)
s := exp.FindSubmatch(r)
if s == nil {
exp = regexp.MustCompile(`alert\('(.*)'\);.*`)
s = exp.FindSubmatch(r)
if s != nil {
return nil, errors.New(string(s[1]))
}
return nil, invalidResponseErr
}
var bt_list _bt_list
json.Unmarshal(s[1], &bt_list)
exp = regexp.MustCompile(`\\`)
for i, _ := range bt_list.Record {
bt_list.Record[i].FileName = exp.ReplaceAllLiteralString(bt_list.Record[i].FileName, `/`)
bt_list.Record[i].FileName = unescapeName(bt_list.Record[i].FileName)
}
return &bt_list, nil
}
// supported uri schemes:
// 'ed2k', 'http', 'https', 'ftp', 'bt', 'magnet', 'thunder', 'Flashget', 'qqdl'
func AddTask(req string) error {
ttype := _TASK_TYPE
if strings.HasPrefix(req, "magnet:") || strings.Contains(req, "get_torrent?userid=") {
ttype = _TASK_TYPE_MAGNET
} else if strings.HasPrefix(req, "ed2k://") {
ttype = _TASK_TYPE_ED2K
} else if strings.HasPrefix(req, "bt://") || strings.HasSuffix(req, ".torrent") {
ttype = _TASK_TYPE_BT
} else if ok, _ := regexp.MatchString(`^[a-zA-Z0-9]{40,40}$`, req); ok {
ttype = _TASK_TYPE_BT
req = "bt://" + req
}
switch ttype {
case _TASK_TYPE, _TASK_TYPE_ED2K:
return addSimpleTask(req)
case _TASK_TYPE_BT:
return addBtTask(req)
case _TASK_TYPE_MAGNET:
return addMagnetTask(req)
case _TASK_TYPE_INVALID:
fallthrough
default:
return unexpectedErr
}
panic(unexpectedErr.Error())
}
func AddBatchTasks(urls []string, oids ...string) error {
// TODO: filter urls
v := url.Values{}
for i := 0; i < len(urls); i++ {
v.Add("cid[]", "")
v.Add("url[]", url.QueryEscape(urls[i]))
}
v.Add("class_id", "0")
if len(oids) > 0 {
var b bytes.Buffer
for i := 0; i < len(oids); i++ {
b.WriteString("0,")
}
v.Add("batch_old_taskid", strings.Join(oids, ","))
v.Add("batch_old_database", b.String())
v.Add("interfrom", "history")
} else {
v.Add("batch_old_taskid", "0,")
v.Add("batch_old_database", "0,")
v.Add("interfrom", "task")
}
tm := current_timestamp()
uri := fmt.Sprintf(BATCHTASKCOMMIT_URL, tm, tm)
r, err := post(uri, v.Encode())
fmt.Printf("%s\n", r)
return err
}
func addSimpleTask(uri string, oid ...string) error {
var from string
if len(oid) > 0 {
from = "history"
} else {
from = "task"
}
dest := fmt.Sprintf(TASKCHECK_URL, url.QueryEscape(uri), from, current_random(), current_timestamp())
r, err := get(dest)
if err == nil {
task_pre, err := getTaskPre(r)
if err != nil {
return err
}
var t_type string
if strings.HasPrefix(uri, "http://") || strings.HasPrefix(uri, "ftp://") || strings.HasPrefix(uri, "https://") {
t_type = strconv.Itoa(_TASK_TYPE)
} else if strings.HasPrefix(uri, "ed2k://") {
t_type = strconv.Itoa(_TASK_TYPE_ED2K)
} else {
return errors.New("Invalid protocol scheme.")
}
v := url.Values{}
v.Add("callback", "ret_task")
v.Add("uid", M.Uid)
v.Add("cid", task_pre.Cid)
v.Add("gcid", task_pre.GCid)
v.Add("size", task_pre.SizeCost)
v.Add("goldbean", task_pre.Goldbean)
v.Add("silverbean", task_pre.Silverbean)
v.Add("t", task_pre.FileName)
v.Add("url", uri)
v.Add("type", t_type)
if len(oid) > 0 {
v.Add("o_taskid", oid[0])
v.Add("o_page", "history")
} else {
v.Add("o_page", "task")
v.Add("o_taskid", "0")
}
dest = TASKCOMMIT_URL + v.Encode()
r, err = get(dest)
if err != nil {
return err
}
if ok, _ := regexp.Match(`ret_task\(.*\)`, r); ok {
return nil
} else {
return invalidResponseErr
}
}
return err
}
func addBtTask(uri string) error {
if strings.HasPrefix(uri, "bt://") {
return addMagnetTask(fmt.Sprintf(GETTORRENT_URL, M.Uid, uri[5:]))
}
return addTorrentTask(uri)
}
func addMagnetTask(link string, oid ...string) error {
uri := fmt.Sprintf(URLQUERY_URL, url.QueryEscape(link), current_random())
r, err := get(uri)
if err != nil {
return err
}
exp := regexp.MustCompile(`queryUrl\((1,.*)\)`)
s := exp.FindSubmatch(r)
if s == nil {
if ok, _ := regexp.Match(`queryUrl\(-1,'[0-9A-Za-z]{40,40}'.*`, r); ok {
return btTaskAlreadyErr
}
return invalidResponseErr
}
if task := evalParse(s[1]); task != nil {
v := url.Values{}
v.Add("uid", M.Uid)
v.Add("btname", task.Name)
v.Add("cid", task.InfoId)
v.Add("tsize", task.Size)
findex := strings.Join(task.Index, "_")
size := strings.Join(task.Sizes, "_")
v.Add("findex", findex)
v.Add("size", size)
if len(oid) > 0 {
v.Add("from", "history")
v.Add("o_taskid", oid[0])
v.Add("o_page", "history")
} else {
v.Add("from", "task")
}
dest := fmt.Sprintf(BTTASKCOMMIT_URL, current_timestamp())
r, err = post(dest, v.Encode())
exp = regexp.MustCompile(`jsonp.*\(\{"id":"(\d+)","avail_space":"\d+".*\}\)`)
s = exp.FindSubmatch(r)
if s == nil {
return invalidResponseErr
}
} else {
return invalidResponseErr
}
return nil
}
func addTorrentTask(filename string) (err error) {
var file *os.File
if file, err = os.Open(filename); err != nil {
return
}
defer file.Close()
// if _, err = taipei.GetMetaInfo(filename); err != nil {
// return
// }
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
var part io.Writer
if part, err = writer.CreateFormFile("filepath", filename); err != nil {
return
}
io.Copy(part, file)
writer.WriteField("random", current_random())
writer.WriteField("interfrom", "task")
dest := TORRENTUPLOAD_URL
log.Println("==>", dest)
req, err := http.NewRequest("POST", dest, bytes.NewReader(body.Bytes()))
if err != nil {
return
}
req.Header.Set("Content-Type", writer.FormDataContentType())
req.Header.Add("User-Agent", user_agent)
req.Header.Add("Accept-Encoding", "gzip, deflate")
retry:
defaultConn.Lock()
resp, err := defaultConn.Do(req)
defaultConn.Unlock()
if err == io.EOF {
goto retry
}
if err != nil {
return
}
log.Println(resp.Status)
r, err := readBody(resp)
resp.Body.Close()
exp := regexp.MustCompile(`<script>document\.domain="xunlei\.com";var btResult =(\{.+\});var btRtcode = 0</script>`)
s := exp.FindSubmatch(r)
if s != nil {
var result _btup_result
json.Unmarshal(s[1], &result)
v := url.Values{}
v.Add("uid", M.Uid)
v.Add("btname", result.Name) // TODO: filter illegal char
v.Add("cid", result.InfoId)
v.Add("tsize", strconv.Itoa(result.Size))
findex := make([]string, 0, len(result.List))
size := make([]string, 0, len(result.List))
for i := 0; i < len(result.List); i++ {
findex = append(findex, result.List[i].Id)
size = append(size, result.List[i].Size)
}
v.Add("findex", strings.Join(findex, "_"))
v.Add("size", strings.Join(size, "_"))
v.Add("from", "0")
dest = fmt.Sprintf(BTTASKCOMMIT_URL, current_timestamp())
r, err = post(dest, v.Encode())
exp = regexp.MustCompile(`jsonp.*\(\{"id":"(\d+)","avail_space":"\d+".*\}\)`)
s = exp.FindSubmatch(r)
if s == nil {
return invalidResponseErr
}
// tasklist_nofresh(4, 1)
// FillBtList(string(s[1]))
return nil
}
exp = regexp.MustCompile(`parent\.edit_bt_list\((\{.*\}),'`)
s = exp.FindSubmatch(r)
if s == nil {
return errors.New("Add bt task failed.")
}
// var result _btup_result
// json.Unmarshal(s[1], &result)
return btTaskAlreadyErr
}
func ProcessTaskDaemon(ch chan byte, callback func(*Task)) {
if len(M.Tasks) == 0 {
GetIncompletedTasks()
}
go func() {
for {
select {
case <-ch:
err := process_task(M.Tasks, callback)
if err != nil {
log.Println("error in ProcessTask():", err)
}
case <-time.After(60 * time.Second):
err := process_task(M.Tasks, callback)
if err != nil {
log.Println("error in ProcessTask():", err)
time.Sleep(5 * time.Second)
ch <- 1
}
}
}
}()
}
func ProcessTask(callback func(*Task)) error {
return process_task(M.Tasks, callback)
}
func process_task(tasks map[string]*Task, callback func(*Task)) error {
l := len(tasks)
if l == 0 {
return errors.New("No tasks in progress.")
}
ct := current_timestamp()
uri := fmt.Sprintf(TASKPROCESS_URL, ct, ct)
v := url.Values{}
list := make([]string, 0, l)
nm_list := make([]string, 0, l)
bt_list := make([]string, 0, l)
for i, _ := range tasks {
if tasks[i].status() == _FLAG_normal && tasks[i].DownloadStatus == "1" {
list = append(list, tasks[i].Id)
if tasks[i].TaskType == 0 {
bt_list = append(bt_list, tasks[i].Id)
} else {
nm_list = append(nm_list, tasks[i].Id)
}
}
}
v.Add("list", strings.Join(list, ","))
v.Add("nm_list", strings.Join(nm_list, ","))
v.Add("bt_list", strings.Join(bt_list, ","))
v.Add("uid", M.Uid)
v.Add("interfrom", "task")
var r []byte
var err error
if r, err = post(uri, v.Encode()); err != nil {
return err
}
exp := regexp.MustCompile(`jsonp\d+\(\{"Process":(.*)\}\)`)
s := exp.FindSubmatch(r)
if s == nil {
return invalidResponseErr
}
var res _ptask_resp
err = json.Unmarshal(s[1], &res)
if err != nil {
return err
}
for i, _ := range res.List {
task := tasks[res.List[i].Id]
task.update(&res.List[i])
if callback != nil {
callback(task)
}
}
return nil
}
func GetTorrentByHash(hash string) ([]byte, error) {
uri := fmt.Sprintf(GETTORRENT_URL, M.Uid, strings.ToUpper(hash))
r, err := get(uri)
if err != nil {
return nil, err
}
exp := regexp.MustCompile(`alert\('(.*)'\)`)
s := exp.FindSubmatch(r)
if s != nil {
log.Printf("%s\n", s[1])
return nil, invalidResponseErr
}
return r, nil
}
func GetTorrentFileByHash(hash, file string) error {
if stat, err := os.Stat(file); err == nil || stat != nil {
return errors.New("Target file already exists.")
}
r, err := GetTorrentByHash(hash)
if err != nil {
return err
}
return ioutil.WriteFile(file, r, 0644)
}
func PauseTasks(ids []string) error {
tids := strings.Join(ids, ",")
tids += ","
uri := fmt.Sprintf(TASKPAUSE_URL, tids, M.Uid, current_timestamp())
r, err := get(uri)
if err != nil {
return err
}
if bytes.Compare(r, []byte("pause_task_resp()")) != 0 {
return invalidResponseErr
}
return nil
}
func DelayAllTasks() error {
r, err := get(DELAYONCE_URL)
if err != nil {
return err
}
log.Printf("%s\n", r)
return nil
}
func ReAddTasks(ts map[string]*Task) {
nbt := make([]*Task, 0, len(ts))
bt := make([]*Task, 0, len(ts))
for i, _ := range ts {
if ts[i].expired() || ts[i].deleted() {
if ts[i].IsBt() {
bt = append(bt, ts[i])
} else {
nbt = append(nbt, ts[i])
}
}
}
if len(nbt) == 1 {
if err := nbt[0].Readd(); err != nil {
log.Println(err)
}
} else if len(nbt) > 1 {
urls, ids := extractTasks(nbt)
if err := AddBatchTasks(urls, ids...); err != nil {
log.Println(err)
}
}
for i, _ := range bt {
if err := addMagnetTask(fmt.Sprintf(GETTORRENT_URL, M.Uid, bt[i].Cid), bt[i].Id); err != nil {
log.Println(err)
}
}
}
func RenameTask(taskid, newname string) error {
t := M.getTaskbyId(taskid)
if t == nil {
return noSuchTaskErr
}
return rename_task(taskid, newname, t.TaskType)
}
func rename_task(taskid, newname string, tasktype byte) error {
v := url.Values{}
v.Add("taskid", taskid)
if tasktype == 0 {
v.Add("bt", "1")
} else {
v.Add("bt", "0")
}
v.Add("filename", newname)
r, err := get(RENAME_URL + v.Encode())
if err != nil {
return err
}
var resp struct {
Result int `json:"result"`
TaskId int `json:"taskid"`
FileName string `json:"filename"`
}
json.Unmarshal(r[1:len(r)-1], &resp)
if resp.Result != 0 {
return fmt.Errorf("error in rename task: %d", resp.Result)
}
log.Println(resp.TaskId, "=>", resp.FileName)
return nil
}
func DeleteTasks(ids []string) error {
return nil
}
func DeleteTask(taskid string) error {
t := M.getTaskbyId(taskid)
if t == nil {
return noSuchTaskErr
}
return t.Remove()
}
func PurgeTask(taskid string) error {
t := M.getTaskbyId(taskid)
if t == nil {
return noSuchTaskErr
}
return t.Purge()
}
func ResumeTasks(pattern string) error {
return nil
}
|
// Package api defines data types representing core collectd data types.
package api // import "collectd.org/api"
import (
"time"
)
// Value represents either a Gauge or a Derive. It is Go's equivalent to the C
// union value_t. If a function accepts a Value, you may pass in either a Gauge
// or a Derive. Passing in any other type may or may not panic.
type Value interface {
Type() string
}
// Gauge represents a gauge metric value, such as a temperature.
// This is Go's equivalent to the C type "gauge_t".
type Gauge float64
// Type returns "gauge".
func (v Gauge) Type() string { return "gauge" }
// Derive represents a counter metric value, such as bytes sent over the
// network. When the counter wraps around (overflows) or is reset, this is
// interpreted as a (huge) negative rate, which is discarded.
// This is Go's equivalent to the C type "derive_t".
type Derive int64
// Type returns "derive".
func (v Derive) Type() string { return "derive" }
// Counter represents a counter metric value, such as bytes sent over the
// network. When a counter value is smaller than the previous value, a wrap
// around (overflow) is assumed. This causes huge spikes in case a counter is
// reset. Only use Counter for very specific cases. If in doubt, use Derive
// instead.
// This is Go's equivalent to the C type "counter_t".
type Counter uint64
// Type returns "counter".
func (v Counter) Type() string { return "counter" }
// Identifier identifies one metric.
type Identifier struct {
Host string
Plugin, PluginInstance string
Type, TypeInstance string
}
// ValueList represents one (set of) data point(s) of one metric. It is Go's
// equivalent of the C type value_list_t.
type ValueList struct {
Identifier
Time time.Time
Interval time.Duration
Values []Value
}
// Writer are objects accepting a ValueList for writing, for example to the
// network.
type Writer interface {
Write(vl ValueList) error
}
// String returns a string representation of the Identifier.
func (id Identifier) String() string {
str := id.Host + "/" + id.Plugin
if id.PluginInstance != "" {
str += "-" + id.PluginInstance
}
str += "/" + id.Type
if id.TypeInstance != "" {
str += "-" + id.TypeInstance
}
return str
}
api: Implement the "Dispatcher" type, which multiplextes writes.
// Package api defines data types representing core collectd data types.
package api // import "collectd.org/api"
import (
"log"
"time"
)
// Value represents either a Gauge or a Derive. It is Go's equivalent to the C
// union value_t. If a function accepts a Value, you may pass in either a Gauge
// or a Derive. Passing in any other type may or may not panic.
type Value interface {
Type() string
}
// Gauge represents a gauge metric value, such as a temperature.
// This is Go's equivalent to the C type "gauge_t".
type Gauge float64
// Type returns "gauge".
func (v Gauge) Type() string { return "gauge" }
// Derive represents a counter metric value, such as bytes sent over the
// network. When the counter wraps around (overflows) or is reset, this is
// interpreted as a (huge) negative rate, which is discarded.
// This is Go's equivalent to the C type "derive_t".
type Derive int64
// Type returns "derive".
func (v Derive) Type() string { return "derive" }
// Counter represents a counter metric value, such as bytes sent over the
// network. When a counter value is smaller than the previous value, a wrap
// around (overflow) is assumed. This causes huge spikes in case a counter is
// reset. Only use Counter for very specific cases. If in doubt, use Derive
// instead.
// This is Go's equivalent to the C type "counter_t".
type Counter uint64
// Type returns "counter".
func (v Counter) Type() string { return "counter" }
// Identifier identifies one metric.
type Identifier struct {
Host string
Plugin, PluginInstance string
Type, TypeInstance string
}
// ValueList represents one (set of) data point(s) of one metric. It is Go's
// equivalent of the C type value_list_t.
type ValueList struct {
Identifier
Time time.Time
Interval time.Duration
Values []Value
}
// Writer are objects accepting a ValueList for writing, for example to the
// network.
type Writer interface {
Write(vl ValueList) error
}
// String returns a string representation of the Identifier.
func (id Identifier) String() string {
str := id.Host + "/" + id.Plugin
if id.PluginInstance != "" {
str += "-" + id.PluginInstance
}
str += "/" + id.Type
if id.TypeInstance != "" {
str += "-" + id.TypeInstance
}
return str
}
// Dispatcher implements a multiplexer for Writer, i.e. each ValueList
// written to it is copied and written to each registered Writer.
type Dispatcher struct {
writers []Writer
}
// Add adds a Writer to the Dispatcher.
func (d *Dispatcher) Add(w Writer) {
d.writers = append(d.writers, w)
}
// Len returns the number of Writers belonging to the Dispatcher.
func (d *Dispatcher) Len() int {
return len(d.writers)
}
// Write starts a new Goroutine for each Writer which creates a copy of the
// ValueList and then calls the Writer with the copy. It returns nil
// immediately.
func (d *Dispatcher) Write(vl ValueList) error {
for _, w := range d.writers {
go func() {
vlCopy := vl
vlCopy.Values = make([]Value, len(vl.Values))
copy(vlCopy.Values, vl.Values)
if err := w.Write(vlCopy); err != nil {
log.Printf("%T.Write(): %v", w, err)
}
}()
}
return nil
}
|
package main
import (
"crypto/x509"
"encoding/pem"
"fmt"
"net/http"
"github.com/russellhaering/gosaml2"
"github.com/russellhaering/goxmldsig"
)
var rawIdpCertificate = `
-----BEGIN CERTIFICATE-----
MIIDpDCCAoygAwIBAgIGAVLIBhAwMA0GCSqGSIb3DQEBBQUAMIGSMQswCQYDVQQGEwJVUzETMBEG
A1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwET2t0YTEU
MBIGA1UECwwLU1NPUHJvdmlkZXIxEzARBgNVBAMMCmRldi0xMTY4MDcxHDAaBgkqhkiG9w0BCQEW
DWluZm9Ab2t0YS5jb20wHhcNMTYwMjA5MjE1MjA2WhcNMjYwMjA5MjE1MzA2WjCBkjELMAkGA1UE
BhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNV
BAoMBE9rdGExFDASBgNVBAsMC1NTT1Byb3ZpZGVyMRMwEQYDVQQDDApkZXYtMTE2ODA3MRwwGgYJ
KoZIhvcNAQkBFg1pbmZvQG9rdGEuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
mtjBOZ8MmhUyi8cGk4dUY6Fj1MFDt/q3FFiaQpLzu3/q5lRVUNUBbAtqQWwY10dzfZguHOuvA5p5
QyiVDvUhe+XkVwN2R2WfArQJRTPnIcOaHrxqQf3o5cCIG21ZtysFHJSo8clPSOe+0VsoRgcJ1aF4
2rODwgqRRZdO9Wh3502XlJ799DJQ23IC7XasKEsGKzJqhlRrfd/FyIuZT0sFHDKRz5snSJhm9gpN
uQlCmk7ONZ1sXqtt+nBIfWIqeoYQubPW7pT5GTc7wouWq4TCjHJiK9k2HiyNxW0E3JX08swEZi2+
LVDjgLzNc4lwjSYIj3AOtPZs8s606oBdIBni4wIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQBMxSkJ
TxkXxsoKNW0awJNpWRbU81QpheMFfENIzLam4Itc/5kSZAaSy/9e2QKfo4jBo/MMbCq2vM9TyeJQ
DJpRaioUTd2lGh4TLUxAxCxtUk/pascL+3Nn936LFmUCLxaxnbeGzPOXAhscCtU1H0nFsXRnKx5a
cPXYSKFZZZktieSkww2Oi8dg2DYaQhGQMSFMVqgVfwEu4bvCRBvdSiNXdWGCZQmFVzBZZ/9rOLzP
pvTFTPnpkavJm81FLlUhiE/oFgKlCDLWDknSpXAI0uZGERcwPca6xvIMh86LjQKjbVci9FYDStXC
qRnqQ+TccSu/B6uONFsDEngGcXSKfB+a
-----END CERTIFICATE-----
`
func main() {
// Load the identity provider's signing certificate. In this case we are using
// an Okta developer account.
block, _ := pem.Decode([]byte(rawIdpCertificate))
idpCert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
panic(err)
}
certStore := dsig.MemoryX509CertificateStore{
Roots: []*x509.Certificate{idpCert},
}
// We sign the AuthnRequest with a random key because Okta doesn't seem
// to verify these.
randomKeyStore := dsig.RandomKeyStoreForTest()
sp := &saml2.SAMLServiceProvider{
IdentityProviderSSOURL: "https://dev-116807.oktapreview.com/app/scaleftdev116807_test_1/exk659aytfMeNI49v0h7/sso/saml",
IdentityProviderIssuer: "http://www.okta.com/exk659aytfMeNI49v0h7",
AssertionConsumerServiceURL: "http://localhost:8080/v1/_saml_callback",
SignAuthnRequests: true,
AudienceURI: "123",
IDPCertificateStore: &certStore,
SPKeyStore: randomKeyStore,
}
http.HandleFunc("/v1/_saml_callback", func(rw http.ResponseWriter, req *http.Request) {
err := req.ParseForm()
if err != nil {
panic(err)
}
assertionInfo, err := sp.RetrieveAssertionInfo(req.FormValue("SAMLResponse"))
if err != nil {
panic(err)
}
fmt.Fprintf(rw, "%v\n", assertionInfo)
})
authUrl, err := sp.BuildAuthURL("")
if err != nil {
panic(err)
}
println(authUrl)
err = http.ListenAndServe(":8080", nil)
if err != nil {
panic(err)
}
}
Update the example code to use http://idp.oktadev.com
package main
import (
"crypto/x509"
"fmt"
"net/http"
"io/ioutil"
"encoding/base64"
"encoding/xml"
saml2 "github.com/russellhaering/gosaml2"
"github.com/russellhaering/gosaml2/types"
dsig "github.com/russellhaering/goxmldsig"
)
func main() {
res, err := http.Get("http://idp.oktadev.com/metadata")
if err != nil {
panic(err)
}
rawMetadata, err := ioutil.ReadAll(res.Body)
if err != nil {
panic(err)
}
metadata := &types.EntityDescriptor{}
err = xml.Unmarshal(rawMetadata, metadata)
if err != nil {
panic(err)
}
certStore := dsig.MemoryX509CertificateStore{
Roots: []*x509.Certificate{},
}
for _, kd := range metadata.IDPSSODescriptor.KeyDescriptors {
certData, err := base64.StdEncoding.DecodeString(kd.KeyInfo.X509Data.X509Certificate.Data)
if err != nil {
panic(err)
}
idpCert, err := x509.ParseCertificate(certData)
if err != nil {
panic(err)
}
certStore.Roots = append(certStore.Roots, idpCert)
}
// We sign the AuthnRequest with a random key because Okta doesn't seem
// to verify these.
randomKeyStore := dsig.RandomKeyStoreForTest()
sp := &saml2.SAMLServiceProvider{
IdentityProviderSSOURL: metadata.IDPSSODescriptor.SingleSignOnService.Location,
IdentityProviderIssuer: "http://example.com/saml/acs/example",
AssertionConsumerServiceURL: "http://localhost:8080/v1/_saml_callback",
SignAuthnRequests: true,
AudienceURI: "http://example.com/saml/acs/example",
IDPCertificateStore: &certStore,
SPKeyStore: randomKeyStore,
}
http.HandleFunc("/v1/_saml_callback", func(rw http.ResponseWriter, req *http.Request) {
err := req.ParseForm()
if err != nil {
rw.WriteHeader(http.StatusBadRequest)
return
}
assertionInfo, err := sp.RetrieveAssertionInfo(req.FormValue("SAMLResponse"))
if err != nil {
rw.WriteHeader(http.StatusForbidden)
return
}
fmt.Fprintf(rw, "NameID: %s\n", assertionInfo.NameID)
fmt.Fprintf(rw, "Assertions:\n")
for key, val := range assertionInfo.Values {
fmt.Fprintf(rw, " %s: %+v\n", key, val)
}
fmt.Fprintf(rw, "\n")
fmt.Fprintf(rw, "Warnings:\n")
fmt.Fprintf(rw, "%+v\n", assertionInfo.WarningInfo)
})
println("Visit this URL To Authenticate:")
authURL, err := sp.BuildAuthURL("")
if err != nil {
panic(err)
}
println(authURL)
println("Supply:")
fmt.Printf(" SP ACS URL : %s\n", sp.AssertionConsumerServiceURL)
err = http.ListenAndServe(":8080", nil)
if err != nil {
panic(err)
}
}
|
package models
type Registration struct {
Id int `name:"id" type:"int" null:"NOT NULL" extra:"PRIMARY"`
FaceId int `name:"face_id" type:"int" null:"NOT NULL" extra:"REFERENCES" refTable:"faces" refField:"id" refFieldShow:"id"`
}
func (c *ModelManager) Registrations() *RegistrationModel {
model := new(RegistrationModel)
model.TableName = "registrations"
model.Caption = "Регистрации"
model.Columns = []string{"id", "face_id", "param_values_id"}
model.ColNames = []string{"ID", "Лицо", "Значение"}
model.Fields = new(Registration)
model.WherePart = make(map[string]interface{}, 0)
model.OrderBy = "id"
model.Limit = "ALL"
model.Offset = 0
model.Sub = false
model.SubTable = nil
model.SubField = ""
return model
}
type RegistrationModel struct {
Entity
}
registration entity: fix columns
remove param_value_id
package models
type Registration struct {
Id int `name:"id" type:"int" null:"NOT NULL" extra:"PRIMARY"`
FaceId int `name:"face_id" type:"int" null:"NOT NULL" extra:"REFERENCES" refTable:"faces" refField:"id" refFieldShow:"id"`
}
func (c *ModelManager) Registrations() *RegistrationModel {
model := new(RegistrationModel)
model.TableName = "registrations"
model.Caption = "Регистрации"
model.Columns = []string{"id", "face_id"}
model.ColNames = []string{"ID", "Лицо"}
model.Fields = new(Registration)
model.WherePart = make(map[string]interface{}, 0)
model.OrderBy = "id"
model.Limit = "ALL"
model.Offset = 0
model.Sub = false
model.SubTable = nil
model.SubField = ""
return model
}
type RegistrationModel struct {
Entity
}
|
package targetworkloadservice
import (
"context"
"fmt"
"strings"
"sync"
"github.com/pkg/errors"
util "github.com/rancher/rancher/pkg/controllers/user/workload"
"github.com/rancher/types/apis/core/v1"
"github.com/rancher/types/config"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
)
// This controller is responsible for monitoring services with targetWorkloadIds,
// locating corresponding pods, and marking them with the label to satisfy service selector
const (
WorkloadIDLabelPrefix = "workloadID"
)
var workloadServiceUUIDToDeploymentUUIDs sync.Map
type Controller struct {
pods v1.PodInterface
workloadLister util.CommonController
podLister v1.PodLister
namespaceLister v1.NamespaceLister
serviceLister v1.ServiceLister
services v1.ServiceInterface
}
type PodController struct {
pods v1.PodInterface
workloadLister util.CommonController
serviceLister v1.ServiceLister
services v1.ServiceInterface
}
func Register(ctx context.Context, workload *config.UserOnlyContext) {
c := &Controller{
pods: workload.Core.Pods(""),
workloadLister: util.NewWorkloadController(workload, nil),
podLister: workload.Core.Pods("").Controller().Lister(),
namespaceLister: workload.Core.Namespaces("").Controller().Lister(),
serviceLister: workload.Core.Services("").Controller().Lister(),
services: workload.Core.Services(""),
}
p := &PodController{
workloadLister: util.NewWorkloadController(workload, nil),
pods: workload.Core.Pods(""),
serviceLister: workload.Core.Services("").Controller().Lister(),
services: workload.Core.Services(""),
}
workload.Core.Services("").AddHandler("workloadServiceController", c.sync)
workload.Core.Pods("").AddHandler("podToWorkloadServiceController", p.sync)
}
func (c *Controller) sync(key string, obj *corev1.Service) error {
if obj == nil {
// delete from the workload map
workloadServiceUUIDToDeploymentUUIDs.Delete(key)
return nil
}
return c.reconcilePods(key, obj)
}
func (c *Controller) reconcilePods(key string, obj *corev1.Service) error {
if obj.Annotations == nil {
return nil
}
value, ok := obj.Annotations[util.WorkloadAnnotation]
if !ok || value == "" {
return nil
}
noop, ok := obj.Annotations[util.WorkloadAnnotatioNoop]
if ok && noop == "true" {
return nil
}
workdloadIDs := strings.Split(value, ",")
if obj.Spec.Selector == nil {
obj.Spec.Selector = make(map[string]string)
}
selectorToAdd := getServiceSelector(obj)
var toUpdate *corev1.Service
if _, ok := obj.Spec.Selector[selectorToAdd]; !ok {
toUpdate = obj.DeepCopy()
toUpdate.Spec.Selector[selectorToAdd] = "true"
}
if err := c.updatePods(key, obj, workdloadIDs); err != nil {
return err
}
if toUpdate != nil {
_, err := c.services.Update(toUpdate)
if err != nil {
return err
}
}
return nil
}
func (c *Controller) updatePods(serviceName string, obj *corev1.Service, workloadIDs []string) error {
var podsToUpdate []*corev1.Pod
set := labels.Set{}
for key, val := range obj.Spec.Selector {
set[key] = val
}
// reset the map
targetWorkloadUUIDs := make(map[string]bool)
for _, workloadID := range workloadIDs {
targetWorkload, err := c.workloadLister.GetByWorkloadID(workloadID)
if err != nil {
logrus.Warnf("Failed to fetch workload [%s]: [%v]", workloadID, err)
continue
}
// Add workload/deployment to the system map
targetWorkloadUUID := fmt.Sprintf("%s/%s", targetWorkload.Namespace, targetWorkload.Name)
targetWorkloadUUIDs[targetWorkloadUUID] = true
// Find all the pods satisfying deployments' selectors
set := labels.Set{}
for key, val := range targetWorkload.SelectorLabels {
set[key] = val
}
workloadSelector := labels.SelectorFromSet(set)
pods, err := c.podLister.List(targetWorkload.Namespace, workloadSelector)
if err != nil {
return errors.Wrapf(err, "Failed to list pods for target workload [%s]", workloadID)
}
for _, pod := range pods {
if pod.DeletionTimestamp != nil {
continue
}
for svsSelectorKey, svcSelectorValue := range obj.Spec.Selector {
if value, ok := pod.Labels[svsSelectorKey]; ok && value == svcSelectorValue {
continue
}
podsToUpdate = append(podsToUpdate, pod)
}
}
// Update the pods with the label
for _, pod := range podsToUpdate {
toUpdate := pod.DeepCopy()
for svcSelectorKey, svcSelectorValue := range obj.Spec.Selector {
toUpdate.Labels[svcSelectorKey] = svcSelectorValue
}
if _, err := c.pods.Update(toUpdate); err != nil {
return errors.Wrapf(err, "Failed to update pod [%s] for target workload [%s]", pod.Name, workloadID)
}
}
}
workloadServiceUUIDToDeploymentUUIDs.Store(serviceName, targetWorkloadUUIDs)
return nil
}
func getServiceSelector(obj *corev1.Service) string {
return fmt.Sprintf("%s_%s", WorkloadIDLabelPrefix, obj.Name)
}
func (c *PodController) sync(key string, obj *corev1.Pod) error {
if obj == nil || obj.DeletionTimestamp != nil {
return nil
}
// filter out deployments that are match for the pods
workloads, err := c.workloadLister.GetWorkloadsMatchingLabels(obj.Namespace, obj.Labels)
if err != nil {
return err
}
workloadServiceUUIDToAdd := []string{}
for _, d := range workloads {
deploymentUUID := fmt.Sprintf("%s/%s", d.Namespace, d.Name)
workloadServiceUUIDToDeploymentUUIDs.Range(func(k, v interface{}) bool {
if _, ok := v.(map[string]bool)[deploymentUUID]; ok {
workloadServiceUUIDToAdd = append(workloadServiceUUIDToAdd, k.(string))
}
return true
})
}
workloadServicesLabels := make(map[string]string)
for _, workloadServiceUUID := range workloadServiceUUIDToAdd {
splitted := strings.Split(workloadServiceUUID, "/")
workload, err := c.serviceLister.Get(obj.Namespace, splitted[1])
if err != nil {
return err
}
for key, value := range workload.Spec.Selector {
workloadServicesLabels[key] = value
}
}
if len(workloadServicesLabels) == 0 {
return nil
}
toUpdate := obj.DeepCopy()
for key, value := range workloadServicesLabels {
toUpdate.Labels[key] = value
}
_, err = c.pods.Update(toUpdate)
if err != nil {
return err
}
return nil
}
Cleanup targetWorkloadLabels from pods on service removal
package targetworkloadservice
import (
"context"
"fmt"
"strings"
"sync"
"reflect"
"github.com/pkg/errors"
util "github.com/rancher/rancher/pkg/controllers/user/workload"
"github.com/rancher/types/apis/core/v1"
"github.com/rancher/types/config"
"github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
)
// This controller is responsible for monitoring services with targetWorkloadIds,
// locating corresponding pods, and marking them with the label to satisfy service selector
const (
WorkloadIDLabelPrefix = "workloadID"
)
var workloadServiceUUIDToDeploymentUUIDs sync.Map
type Controller struct {
pods v1.PodInterface
workloadLister util.CommonController
podLister v1.PodLister
namespaceLister v1.NamespaceLister
serviceLister v1.ServiceLister
services v1.ServiceInterface
}
type PodController struct {
pods v1.PodInterface
workloadLister util.CommonController
serviceLister v1.ServiceLister
services v1.ServiceInterface
}
func Register(ctx context.Context, workload *config.UserOnlyContext) {
c := &Controller{
pods: workload.Core.Pods(""),
workloadLister: util.NewWorkloadController(workload, nil),
podLister: workload.Core.Pods("").Controller().Lister(),
namespaceLister: workload.Core.Namespaces("").Controller().Lister(),
serviceLister: workload.Core.Services("").Controller().Lister(),
services: workload.Core.Services(""),
}
p := &PodController{
workloadLister: util.NewWorkloadController(workload, nil),
pods: workload.Core.Pods(""),
serviceLister: workload.Core.Services("").Controller().Lister(),
services: workload.Core.Services(""),
}
workload.Core.Services("").AddHandler("workloadServiceController", c.sync)
workload.Core.Pods("").AddHandler("podToWorkloadServiceController", p.sync)
}
func (c *Controller) sync(key string, obj *corev1.Service) error {
if obj == nil || obj.DeletionTimestamp != nil {
if _, ok := workloadServiceUUIDToDeploymentUUIDs.Load(key); ok {
// update all pods having the label, so the label gets removed
splitted := strings.Split(key, "/")
namespace := splitted[0]
serviceName := splitted[1]
selectorToCheck := getServiceSelector(serviceName)
pods, err := c.podLister.List(namespace, labels.SelectorFromSet(labels.Set{selectorToCheck: "true"}))
if err != nil {
return err
}
for _, pod := range pods {
c.pods.Controller().Enqueue(namespace, pod.Name)
}
}
// delete from the workload map
workloadServiceUUIDToDeploymentUUIDs.Delete(key)
return nil
}
return c.reconcilePods(key, obj)
}
func (c *Controller) reconcilePods(key string, obj *corev1.Service) error {
if obj.Annotations == nil {
return nil
}
value, ok := obj.Annotations[util.WorkloadAnnotation]
if !ok || value == "" {
return nil
}
noop, ok := obj.Annotations[util.WorkloadAnnotatioNoop]
if ok && noop == "true" {
return nil
}
workdloadIDs := strings.Split(value, ",")
if obj.Spec.Selector == nil {
obj.Spec.Selector = make(map[string]string)
}
selectorToAdd := getServiceSelector(obj.Name)
var toUpdate *corev1.Service
if _, ok := obj.Spec.Selector[selectorToAdd]; !ok {
toUpdate = obj.DeepCopy()
toUpdate.Spec.Selector[selectorToAdd] = "true"
}
if err := c.updatePods(key, obj, workdloadIDs); err != nil {
return err
}
if toUpdate != nil {
_, err := c.services.Update(toUpdate)
if err != nil {
return err
}
}
return nil
}
func (c *Controller) updatePods(serviceName string, obj *corev1.Service, workloadIDs []string) error {
var podsToUpdate []*corev1.Pod
set := labels.Set{}
for key, val := range obj.Spec.Selector {
set[key] = val
}
// reset the map
targetWorkloadUUIDs := make(map[string]bool)
for _, workloadID := range workloadIDs {
targetWorkload, err := c.workloadLister.GetByWorkloadID(workloadID)
if err != nil {
logrus.Warnf("Failed to fetch workload [%s]: [%v]", workloadID, err)
continue
}
// Add workload/deployment to the system map
targetWorkloadUUID := fmt.Sprintf("%s/%s", targetWorkload.Namespace, targetWorkload.Name)
targetWorkloadUUIDs[targetWorkloadUUID] = true
// Find all the pods satisfying deployments' selectors
set := labels.Set{}
for key, val := range targetWorkload.SelectorLabels {
set[key] = val
}
workloadSelector := labels.SelectorFromSet(set)
pods, err := c.podLister.List(targetWorkload.Namespace, workloadSelector)
if err != nil {
return errors.Wrapf(err, "Failed to list pods for target workload [%s]", workloadID)
}
for _, pod := range pods {
if pod.DeletionTimestamp != nil {
continue
}
for svsSelectorKey, svcSelectorValue := range obj.Spec.Selector {
if value, ok := pod.Labels[svsSelectorKey]; ok && value == svcSelectorValue {
continue
}
podsToUpdate = append(podsToUpdate, pod)
}
}
// Update the pods with the label
for _, pod := range podsToUpdate {
toUpdate := pod.DeepCopy()
for svcSelectorKey, svcSelectorValue := range obj.Spec.Selector {
toUpdate.Labels[svcSelectorKey] = svcSelectorValue
}
if _, err := c.pods.Update(toUpdate); err != nil {
return errors.Wrapf(err, "Failed to update pod [%s] for target workload [%s]", pod.Name, workloadID)
}
}
}
workloadServiceUUIDToDeploymentUUIDs.Store(serviceName, targetWorkloadUUIDs)
return nil
}
func getServiceSelector(serviceName string) string {
return fmt.Sprintf("%s_%s", WorkloadIDLabelPrefix, serviceName)
}
func (c *PodController) sync(key string, obj *corev1.Pod) error {
if obj == nil || obj.DeletionTimestamp != nil {
return nil
}
// filter out deployments that are match for the pods
workloads, err := c.workloadLister.GetWorkloadsMatchingLabels(obj.Namespace, obj.Labels)
if err != nil {
return err
}
var workloadServiceUUIDToAdd []string
for _, d := range workloads {
deploymentUUID := fmt.Sprintf("%s/%s", d.Namespace, d.Name)
workloadServiceUUIDToDeploymentUUIDs.Range(func(k, v interface{}) bool {
if _, ok := v.(map[string]bool)[deploymentUUID]; ok {
workloadServiceUUIDToAdd = append(workloadServiceUUIDToAdd, k.(string))
}
return true
})
}
workloadServicesLabels := make(map[string]string)
for _, workloadServiceUUID := range workloadServiceUUIDToAdd {
splitted := strings.Split(workloadServiceUUID, "/")
workload, err := c.serviceLister.Get(obj.Namespace, splitted[1])
if err != nil {
return err
}
for key, value := range workload.Spec.Selector {
workloadServicesLabels[key] = value
}
}
toUpdate := obj.DeepCopy()
// remove old labels
labels := map[string]string{}
for key, value := range toUpdate.Labels {
if strings.HasPrefix(key, WorkloadIDLabelPrefix) {
if _, ok := workloadServicesLabels[key]; !ok {
continue
}
}
labels[key] = value
}
// add new labels
for key, value := range workloadServicesLabels {
labels[key] = value
}
toUpdate.Labels = labels
if reflect.DeepEqual(obj.Labels, labels) {
return nil
}
_, err = c.pods.Update(toUpdate)
if err != nil {
return err
}
return nil
}
|
/*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apis
import (
"encoding/json"
"fmt"
"net/url"
)
// URL is an alias of url.URL.
// It has custom json marshal methods that enable it to be used in K8s CRDs
// such that the CRD resource will have the URL but operator code can can work with url.URL struct
type URL url.URL
// ParseURL attempts to parse the given string as a URL.
// Compatible with net/url.Parse except in the case of an empty string, where
// the resulting *URL will be nil with no error.
func ParseURL(u string) (*URL, error) {
if u == "" {
return nil, nil
}
pu, err := url.Parse(u)
if err != nil {
return nil, err
}
return (*URL)(pu), nil
}
// HTTP creates an http:// URL pointing to a known domain.
func HTTP(domain string) *URL {
return &URL{
Scheme: "http",
Host: domain,
}
}
// HTTPS creates an https:// URL pointing to a known domain.
func HTTPS(domain string) *URL {
return &URL{
Scheme: "https",
Host: domain,
}
}
// IsEmpty returns true if the URL is `nil` or represents an empty URL.
func (u *URL) IsEmpty() bool {
if u == nil {
return true
}
return *u == URL{}
}
// MarshalJSON implements a custom json marshal method used when this type is
// marshaled using json.Marshal.
// json.Marshaler impl
func (u URL) MarshalJSON() ([]byte, error) {
b := fmt.Sprintf("%q", u.String())
return []byte(b), nil
}
// UnmarshalJSON implements the json unmarshal method used when this type is
// unmarsheled using json.Unmarshal.
// json.Unmarshaler impl
func (u *URL) UnmarshalJSON(b []byte) error {
var ref string
if err := json.Unmarshal(b, &ref); err != nil {
return err
}
if r, err := ParseURL(ref); err != nil {
return err
} else if r != nil {
*u = *r
} else {
*u = URL{}
}
return nil
}
// String returns the full string representation of the URL.
func (u *URL) String() string {
if u == nil {
return ""
}
uu := url.URL(*u)
return uu.String()
}
// URL returns the URL as a url.URL.
func (u *URL) URL() *url.URL {
if u == nil {
return &url.URL{}
}
url := url.URL(*u)
return &url
}
// ResolveReference calls the underlying ResolveReference method
// and returns an apis.URL
func (u *URL) ResolveReference(ref *URL) *URL {
if ref == nil {
return u
}
// Turn both u / ref to url.URL
uRef := url.URL(*ref)
uu := url.URL(*u)
newU := uu.ResolveReference(&uRef)
// Turn new back to apis.URL
ret := URL(*newU)
return &ret
}
Provide an semantic equality func for apis.URL (#987)
url.URL has an unexported type (UserInfo) which causes semantic
equality to panic unless we add a custom equality function
/*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apis
import (
"encoding/json"
"fmt"
"net/url"
"k8s.io/apimachinery/pkg/api/equality"
)
// URL is an alias of url.URL.
// It has custom json marshal methods that enable it to be used in K8s CRDs
// such that the CRD resource will have the URL but operator code can can work with url.URL struct
type URL url.URL
// ParseURL attempts to parse the given string as a URL.
// Compatible with net/url.Parse except in the case of an empty string, where
// the resulting *URL will be nil with no error.
func ParseURL(u string) (*URL, error) {
if u == "" {
return nil, nil
}
pu, err := url.Parse(u)
if err != nil {
return nil, err
}
return (*URL)(pu), nil
}
// HTTP creates an http:// URL pointing to a known domain.
func HTTP(domain string) *URL {
return &URL{
Scheme: "http",
Host: domain,
}
}
// HTTPS creates an https:// URL pointing to a known domain.
func HTTPS(domain string) *URL {
return &URL{
Scheme: "https",
Host: domain,
}
}
// IsEmpty returns true if the URL is `nil` or represents an empty URL.
func (u *URL) IsEmpty() bool {
if u == nil {
return true
}
return *u == URL{}
}
// MarshalJSON implements a custom json marshal method used when this type is
// marshaled using json.Marshal.
// json.Marshaler impl
func (u URL) MarshalJSON() ([]byte, error) {
b := fmt.Sprintf("%q", u.String())
return []byte(b), nil
}
// UnmarshalJSON implements the json unmarshal method used when this type is
// unmarsheled using json.Unmarshal.
// json.Unmarshaler impl
func (u *URL) UnmarshalJSON(b []byte) error {
var ref string
if err := json.Unmarshal(b, &ref); err != nil {
return err
}
if r, err := ParseURL(ref); err != nil {
return err
} else if r != nil {
*u = *r
} else {
*u = URL{}
}
return nil
}
// String returns the full string representation of the URL.
func (u *URL) String() string {
if u == nil {
return ""
}
uu := url.URL(*u)
return uu.String()
}
// URL returns the URL as a url.URL.
func (u *URL) URL() *url.URL {
if u == nil {
return &url.URL{}
}
url := url.URL(*u)
return &url
}
// ResolveReference calls the underlying ResolveReference method
// and returns an apis.URL
func (u *URL) ResolveReference(ref *URL) *URL {
if ref == nil {
return u
}
// Turn both u / ref to url.URL
uRef := url.URL(*ref)
uu := url.URL(*u)
newU := uu.ResolveReference(&uRef)
// Turn new back to apis.URL
ret := URL(*newU)
return &ret
}
func init() {
equality.Semantic.AddFunc(
// url.URL has an unexported type (UserInfo) which causes semantic
// equality to panic unless we add a custom equality function
func(a, b URL) bool {
return a.String() == b.String()
},
)
}
|
// Copyright 2013 Andreas Koch. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package updatehandler
import (
"strings"
)
type hub struct {
// Registered connections.
connections map[*connection]bool
// Inbound messages from the connections.
broadcast chan Message
// Register requests from the connections.
register chan *connection
// Unregister requests from connections.
unregister chan *connection
}
var h = hub{
broadcast: make(chan Message, 1),
register: make(chan *connection, 1),
unregister: make(chan *connection, 1),
connections: make(map[*connection]bool),
}
func (hub *hub) ConnectionsByRoute(route string) []*connection {
connectionsByRoute := make([]*connection, 0)
for c := range h.connections {
if strings.HasSuffix(route, c.Route) {
connectionsByRoute = append(connectionsByRoute, c)
}
}
return connectionsByRoute
}
func (h *hub) run() {
for {
select {
case c := <-h.register:
{
h.connections[c] = true
}
case c := <-h.unregister:
{
delete(h.connections, c)
close(c.send)
}
case m := <-h.broadcast:
{
affectedConnections := h.ConnectionsByRoute(m.Route)
for _, c := range affectedConnections {
select {
case c.send <- m:
default:
delete(h.connections, c)
close(c.send)
go c.ws.Close()
}
}
}
}
}
}
Hub Channel Close issue
// Copyright 2013 Andreas Koch. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package updatehandler
import (
"strings"
)
type hub struct {
// Registered connections.
connections map[*connection]bool
// Inbound messages from the connections.
broadcast chan Message
// Register requests from the connections.
register chan *connection
// Unregister requests from connections.
unregister chan *connection
}
var h = hub{
broadcast: make(chan Message, 1),
register: make(chan *connection, 1),
unregister: make(chan *connection, 1),
connections: make(map[*connection]bool),
}
func (hub *hub) ConnectionsByRoute(route string) []*connection {
connectionsByRoute := make([]*connection, 0)
for c := range h.connections {
if strings.HasSuffix(route, c.Route) {
connectionsByRoute = append(connectionsByRoute, c)
}
}
return connectionsByRoute
}
func (h *hub) run() {
for {
select {
case c := <-h.register:
{
h.connections[c] = true
}
case c := <-h.unregister:
{
delete(h.connections, c)
close(c.send)
}
case m := <-h.broadcast:
{
affectedConnections := h.ConnectionsByRoute(m.Route)
for _, c := range affectedConnections {
select {
case c.send <- m:
default:
delete(h.connections, c)
// todo: introduce a maanger which sends a signal if a route is removed and closes the channel
// if I just call close there this will fail quite often if the channel has already been closed.
//close(c.send)
go c.ws.Close()
}
}
}
}
}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package os
import (
"syscall";
"os";
"unsafe";
)
const (
blockSize = 4096 // TODO(r): use statfs
)
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}
// Negative count means read until EOF.
func readdirnames(file *File, count int) (names []string, err *os.Error) {
// If this file has no dirinfo, create one.
if file.dirinfo == nil {
file.dirinfo = new(dirInfo);
// The buffer must be at least a block long.
// TODO(r): use fstatfs to find fs block size.
file.dirinfo.buf = make([]byte, blockSize);
}
d := file.dirinfo;
size := count;
if size < 0 {
size = 100
}
names = make([]string, 0, size); // Empty with room to grow.
for count != 0 {
// Refill the buffer if necessary
if d.bufp == d.nbuf {
var errno int64;
dbuf := (*syscall.Dirent)(unsafe.Pointer(&d.buf[0]));
d.nbuf, errno = syscall.Getdents(file.fd, dbuf, int64(len(d.buf)));
if d.nbuf < 0 {
return names, os.ErrnoToError(errno)
}
if d.nbuf == 0 {
break // EOF
}
d.bufp = 0;
}
// Drain the buffer
for count != 0 && d.bufp < d.nbuf {
dirent := (*syscall.Dirent)(unsafe.Pointer(&d.buf[d.bufp]));
d.bufp += int64(dirent.Reclen);
if dirent.Ino == 0 { // File absent in directory.
continue
}
var name = string(dirent.Name[0:clen(dirent.Namlen)]);
if name == "." || name == ".." { // Useless names
continue
}
count--;
if len(names) == cap(names) {
nnames := make([]string, len(names), 2*len(names));
for i := 0; i < len(names); i++ {
nnames[i] = names[i]
}
names = nnames;
}
names = names[0:len(names)+1];
names[len(names)-1] = name;
}
}
return names, nil;
}
fix typo breaking linux build
R=rsc
OCL=27304
CL=27304
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package os
import (
"syscall";
"os";
"unsafe";
)
const (
blockSize = 4096 // TODO(r): use statfs
)
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}
// Negative count means read until EOF.
func readdirnames(file *File, count int) (names []string, err *os.Error) {
// If this file has no dirinfo, create one.
if file.dirinfo == nil {
file.dirinfo = new(dirInfo);
// The buffer must be at least a block long.
// TODO(r): use fstatfs to find fs block size.
file.dirinfo.buf = make([]byte, blockSize);
}
d := file.dirinfo;
size := count;
if size < 0 {
size = 100
}
names = make([]string, 0, size); // Empty with room to grow.
for count != 0 {
// Refill the buffer if necessary
if d.bufp == d.nbuf {
var errno int64;
dbuf := (*syscall.Dirent)(unsafe.Pointer(&d.buf[0]));
d.nbuf, errno = syscall.Getdents(file.fd, dbuf, int64(len(d.buf)));
if d.nbuf < 0 {
return names, os.ErrnoToError(errno)
}
if d.nbuf == 0 {
break // EOF
}
d.bufp = 0;
}
// Drain the buffer
for count != 0 && d.bufp < d.nbuf {
dirent := (*syscall.Dirent)(unsafe.Pointer(&d.buf[d.bufp]));
d.bufp += int64(dirent.Reclen);
if dirent.Ino == 0 { // File absent in directory.
continue
}
var name = string(dirent.Name[0:clen(dirent.Name)]);
if name == "." || name == ".." { // Useless names
continue
}
count--;
if len(names) == cap(names) {
nnames := make([]string, len(names), 2*len(names));
for i := 0; i < len(names); i++ {
nnames[i] = names[i]
}
names = nnames;
}
names = names[0:len(names)+1];
names[len(names)-1] = name;
}
}
return names, nil;
}
|
package sous
import "github.com/samsalisbury/semv"
type (
// Manifests is a collection of Manifest.
Manifests []*Manifest
// Manifest is a minimal representation of the global deployment state of
// a particular named application. It is designed to be written and read by
// humans as-is, and expanded into full Deployments internally. It is a DTO,
// which can be stored in YAML files.
//
// Manifest has a direct two-way mapping to/from Deployments.
Manifest struct {
// Source is the location of the source code for this piece of software.
Source SourceLocation `validate:"nonZero"`
// Owners is a list of named owners of this repository. The type of this
// field is subject to change.
Owners []string
// Kind is the kind of software that SourceRepo represents.
Kind ManifestKind `validate:"nonZero"`
// Deployments is a map of cluster names to DeploymentSpecs
Deployments map[string]PartialDeploySpec `validate:"nonEmpty,valuesNonZero"`
}
// ManifestKind describes the broad category of a piece of software, such as
// a long-running HTTP service, or a scheduled task, etc. It is used to
// determine resource sets and contracts that can be run on this
// application.
ManifestKind string
// DeploymentSpecs is a list of DeploymentSpecs.
DeploymentSpecs []PartialDeploySpec
// DeploymentSpec is the interface to describe a cluster-wide deployment of
// an application described by a Manifest. Together with the manifest, one
// can assemble full Deployments.
//
// Unexported fields in DeploymentSpec are not intended to be serialised
// to/from yaml, but are useful when set internally.
PartialDeploySpec struct {
// DeployConfig contains config information for this deployment, see
// DeployConfig.
DeployConfig `yaml:"inline"`
// Version is a semantic version with the following properties:
//
// 1. The major/minor/patch/pre-release fields exist as a tag in
// the source code repository containing this application.
// 2. The metadata field is the full revision ID of the commit
// which the tag in 1. points to.
Version semv.Version `validate:"nonZero"`
// clusterName is the name of the cluster this deployment belongs to. Upon
// parsing the Manifest, this will be set to the key in
// Manifests.Deployments which points at this Deployment.
clusterName string
}
// DeployConfig represents the configuration of a deployment's tasks,
// in a specific cluster. i.e. their resources, environment, and the number
// of instances.
DeployConfig struct {
// Resources represents the resources each instance of this software
// will be given by the execution environment.
Resources Resources `validate:"nonZero"`
// Env is a list of environment variables to set for each instance of
// of this deployment. It will be checked for conflict with the
// definitions found in State.Defs.EnvVars, and if not in conflict
// assumes the greatest priority.
Env map[string]string
// NumInstances is a guide to the number of instances that should be
// deployed in this cluster, note that the actual number may differ due
// to decisions made by Sous. If set to zero, Sous will decide how many
// instances to launch.
NumInstances int
}
// Resources is a mapping of resource name to value, used to provision
// single instances of an application. It is validated against
// State.Defs.Resources. The keys must match defined resource names, and the
// values must parse to the defined types.
Resources map[string]string
)
const (
// HTTP Service represents an HTTP service which is a long-running process,
// and listens and responds to HTTP requests.
HTTPService (ManifestKind) = "http-service"
// ScheduledJob represents a process which starts on some schedule, and
// exits when it completes its task.
ScheduledJob = "scheduled-job"
)
Update validation tags on Manifest and related
package sous
import "github.com/samsalisbury/semv"
type (
// Manifests is a collection of Manifest.
Manifests []*Manifest
// Manifest is a minimal representation of the global deployment state of
// a particular named application. It is designed to be written and read by
// humans as-is, and expanded into full Deployments internally. It is a DTO,
// which can be stored in YAML files.
//
// Manifest has a direct two-way mapping to/from Deployments.
Manifest struct {
// Source is the location of the source code for this piece of software.
Source SourceLocation `validate:"nonzero"`
// Owners is a list of named owners of this repository. The type of this
// field is subject to change.
Owners []string
// Kind is the kind of software that SourceRepo represents.
Kind ManifestKind `validate:"nonzero"`
// Deployments is a map of cluster names to DeploymentSpecs
Deployments map[string]PartialDeploySpec `validate:"keys=nonempty,values=nonzero"`
}
// ManifestKind describes the broad category of a piece of software, such as
// a long-running HTTP service, or a scheduled task, etc. It is used to
// determine resource sets and contracts that can be run on this
// application.
ManifestKind string
// DeploymentSpecs is a list of DeploymentSpecs.
DeploymentSpecs []PartialDeploySpec
// DeploymentSpec is the interface to describe a cluster-wide deployment of
// an application described by a Manifest. Together with the manifest, one
// can assemble full Deployments.
//
// Unexported fields in DeploymentSpec are not intended to be serialised
// to/from yaml, but are useful when set internally.
PartialDeploySpec struct {
// DeployConfig contains config information for this deployment, see
// DeployConfig.
DeployConfig `yaml:"inline"`
// Version is a semantic version with the following properties:
//
// 1. The major/minor/patch/pre-release fields exist as a tag in
// the source code repository containing this application.
// 2. The metadata field is the full revision ID of the commit
// which the tag in 1. points to.
Version semv.Version `validate:"nonzero"`
// clusterName is the name of the cluster this deployment belongs to. Upon
// parsing the Manifest, this will be set to the key in
// Manifests.Deployments which points at this Deployment.
clusterName string
}
// DeployConfig represents the configuration of a deployment's tasks,
// in a specific cluster. i.e. their resources, environment, and the number
// of instances.
DeployConfig struct {
// Resources represents the resources each instance of this software
// will be given by the execution environment.
Resources Resources `validate:"keys=nonempty,values=nonempty"`
// Env is a list of environment variables to set for each instance of
// of this deployment. It will be checked for conflict with the
// definitions found in State.Defs.EnvVars, and if not in conflict
// assumes the greatest priority.
Env map[string]string `validate:"keys=nonempty,values=nonempty"`
// NumInstances is a guide to the number of instances that should be
// deployed in this cluster, note that the actual number may differ due
// to decisions made by Sous. If set to zero, Sous will decide how many
// instances to launch.
NumInstances int
}
// Resources is a mapping of resource name to value, used to provision
// single instances of an application. It is validated against
// State.Defs.Resources. The keys must match defined resource names, and the
// values must parse to the defined types.
Resources map[string]string
)
const (
// HTTP Service represents an HTTP service which is a long-running process,
// and listens and responds to HTTP requests.
HTTPService (ManifestKind) = "http-service"
// ScheduledJob represents a process which starts on some schedule, and
// exits when it completes its task.
ScheduledJob = "scheduled-job"
)
|
package sparse
import (
"crypto/sha1"
"net"
"os"
"strconv"
"bytes"
"encoding/binary"
fio "github.com/rancher/sparse-tools/directfio"
"github.com/rancher/sparse-tools/log"
)
import "encoding/gob"
import "errors"
import "fmt"
import "time"
// HashCollsisionError indicates block hash collision
type HashCollsisionError struct{}
func (e *HashCollsisionError) Error() string {
return "file hash divergence: storage error or block hash collision"
}
// TCPEndPoint tcp connection address
type TCPEndPoint struct {
Host string
Port int16
}
const connectionRetries = 5
const verboseClient = true
// SyncFile synchronizes local file to remote host
func SyncFile(localPath string, addr TCPEndPoint, remotePath string, timeout int) (hashLocal []byte, err error) {
for retries := 1; retries >= 0; retries-- {
hashLocal, err = syncFile(localPath, addr, remotePath, timeout, retries > 0)
if err != nil {
if _, ok := err.(*HashCollsisionError); ok {
// retry on HahsCollisionError
log.Warn("SSync: retrying on chunk hash collision...")
continue
} else {
log.Error("SSync error:", err)
}
}
break
}
return
}
func syncFile(localPath string, addr TCPEndPoint, remotePath string, timeout int, retry bool) ([]byte, error) {
var hashLocal []byte // empty hash for errors
file, err := fio.OpenFile(localPath, os.O_RDONLY, 0)
if err != nil {
log.Error("Failed to open local source file:", localPath)
return hashLocal, err
}
defer file.Close()
size, errSize := file.Seek(0, os.SEEK_END)
if errSize != nil {
log.Error("Failed to get size of local source file:", localPath, errSize)
return hashLocal, errSize
}
SetupFileIO(size%Blocks == 0)
conn := connect(addr.Host, strconv.Itoa(int(addr.Port)), timeout)
if nil == conn {
log.Error("Failed to connect to", addr)
return hashLocal, errors.New("Connection failed")
}
defer conn.Close()
encoder := gob.NewEncoder(conn)
decoder := gob.NewDecoder(conn)
// Use unix time as hash salt
salt := make([]byte, binary.MaxVarintLen64)
binary.PutVarint(salt, time.Now().UnixNano())
status := sendSyncRequest(encoder, decoder, remotePath, size, salt)
if !status {
return hashLocal, errors.New("Sync request failed")
}
abortStream := make(chan error)
layoutStream := make(chan FileInterval, 128)
errStream := make(chan error)
// Initiate interval loading...
err = loadFileLayout(abortStream, file, layoutStream, errStream)
if err != nil {
log.Error("Failed to retrieve local file layout:", err)
return hashLocal, err
}
fileStream := make(chan FileInterval, 128)
unorderedStream := make(chan HashedDataInterval, 128)
orderedStream := make(chan HashedDataInterval, 128)
go IntervalSplitter(layoutStream, fileStream)
FileReaderGroup(fileReaders, salt, fileStream, localPath, unorderedStream)
go OrderIntervals("src:", unorderedStream, orderedStream)
// Get remote file intervals and their hashes
netInStream := make(chan HashedInterval, 128)
netInStreamDone := make(chan bool)
go netDstReceiver(decoder, netInStream, netInStreamDone)
return processDiff(salt, abortStream, errStream, encoder, decoder, orderedStream, netInStream, netInStreamDone, retry)
}
func connect(host, port string, timeout int) net.Conn {
// connect to this socket
endpoint := host + ":" + port
raddr, err := net.ResolveTCPAddr("tcp", endpoint)
if err != nil {
log.Fatal("Connection address resolution error:", err)
}
timeStart := time.Now()
timeStop := timeStart.Add(time.Duration(timeout) * time.Second)
for timeNow := timeStart; timeNow.Before(timeStop); timeNow = time.Now() {
conn, err := net.DialTCP("tcp", nil, raddr)
if err == nil {
return conn
}
log.Warn("Failed connection to", endpoint, "Retrying...")
if timeNow != timeStart {
// only sleep after the second attempt to speedup tests
time.Sleep(1 * time.Second)
}
}
return nil
}
func sendSyncRequest(encoder *gob.Encoder, decoder *gob.Decoder, path string, size int64, salt []byte) bool {
err := encoder.Encode(requestHeader{requestMagic, syncRequestCode})
if err != nil {
log.Fatal("Client protocol encoder error:", err)
return false
}
err = encoder.Encode(path)
if err != nil {
log.Fatal("Client protocol encoder error:", err)
return false
}
err = encoder.Encode(size)
if err != nil {
log.Fatal("Client protocol encoder error:", err)
return false
}
err = encoder.Encode(salt)
if err != nil {
log.Fatal("Client protocol encoder error:", err)
return false
}
var ack bool
err = decoder.Decode(&ack)
if err != nil {
log.Fatal("Client protocol decoder error:", err)
return false
}
return ack
}
// Get remote hashed intervals
func netDstReceiver(decoder *gob.Decoder, netInStream chan<- HashedInterval, netInStreamDone chan<- bool) {
status := true
for {
if verboseClient {
log.Debug("Client.netDstReceiver decoding...")
}
var r HashedInterval
err := decoder.Decode(&r)
if err != nil {
log.Fatal("Cient protocol error:", err)
status = false
break
}
// interval := r.Interval
if r.Kind == SparseIgnore {
if verboseClient {
log.Debug("Client.netDstReceiver got <eof>")
}
break
}
if verboseClient {
switch r.Kind {
case SparseData:
log.Debug("Client.netDstReceiver got data", r.FileInterval, "hash[", len(r.Hash), "]")
case SparseHole:
log.Debug("Client.netDstReceiver got hole", r.FileInterval)
}
}
netInStream <- r
}
close(netInStream)
netInStreamDone <- status
}
// file reading chunk
type fileChunk struct {
eof bool // end of stream: stop reader
header FileInterval
}
// network transfer chunk
type diffChunk struct {
status bool // read file or network send error yield false
header DataInterval
}
func processDiff(salt []byte, abortStream chan<- error, errStream <-chan error, encoder *gob.Encoder, decoder *gob.Decoder, local <-chan HashedDataInterval, remote <-chan HashedInterval, netInStreamDone <-chan bool, retry bool) (hashLocal []byte, err error) {
// Local: __ _*
// Remote: *_ **
hashLocal = make([]byte, 0) // empty hash for errors
const concurrentReaders = 4
netStream := make(chan diffChunk, 128)
netStatus := make(chan netXferStatus)
go networkSender(netStream, encoder, netStatus)
fileHasher := sha1.New()
fileHasher.Write(salt)
lrange := <-local
rrange := <-remote
for lrange.Len() != 0 {
if rrange.Len() == 0 {
// Copy local tail
if verboseClient {
logData("LHASH", lrange.Data)
}
hashFileData(fileHasher, lrange.Len(), lrange.Data)
processFileInterval(lrange, HashedInterval{FileInterval{SparseHole, lrange.Interval}, make([]byte, 0)}, netStream)
lrange = <-local
continue
}
// Diff
if verboseClient {
log.Debug("Diff:", lrange.HashedInterval, rrange)
}
if lrange.Begin == rrange.Begin {
if lrange.End > rrange.End {
data := lrange.Data
if len(data) > 0 {
data = lrange.Data[:rrange.Len()]
}
subrange := HashedDataInterval{HashedInterval{FileInterval{lrange.Kind, rrange.Interval}, lrange.Hash}, data}
if verboseClient {
logData("LHASH", subrange.Data)
}
hashFileData(fileHasher, subrange.Len(), subrange.Data)
processFileInterval(subrange, rrange, netStream)
if len(data) > 0 {
lrange.Data = lrange.Data[subrange.Len():]
}
lrange.Begin = rrange.End
rrange = <-remote
continue
} else if lrange.End < rrange.End {
if verboseClient {
logData("LHASH", lrange.Data)
}
hashFileData(fileHasher, lrange.Len(), lrange.Data)
processFileInterval(lrange, HashedInterval{FileInterval{rrange.Kind, lrange.Interval}, make([]byte, 0)}, netStream)
rrange.Begin = lrange.End
lrange = <-local
continue
}
if verboseClient {
logData("LHASH", lrange.Data)
}
hashFileData(fileHasher, lrange.Len(), lrange.Data)
processFileInterval(lrange, rrange, netStream)
lrange = <-local
rrange = <-remote
} else {
// Should never happen
log.Fatal("processDiff internal error")
return
}
}
log.Info("Finished processing file diff")
status := true
err = <-errStream
if err != nil {
log.Error("Sync client file load aborted:", err)
status = false
}
// make sure we finished consuming dst hashes
status = <-netInStreamDone && status // netDstReceiver finished
log.Info("Finished consuming remote file hashes, status=", status)
// Send end of transmission
netStream <- diffChunk{true, DataInterval{FileInterval{SparseIgnore, Interval{0, 0}}, make([]byte, 0)}}
// get network sender status
net := <-netStatus
log.Info("Finished sending file diff of", net.byteCount, "(bytes), status=", net.status)
if !net.status {
err = errors.New("netwoek transfer failure")
return
}
var statusRemote bool
err = decoder.Decode(&statusRemote)
if err != nil {
log.Fatal("Cient protocol remote status error:", err)
return
}
if !statusRemote {
err = errors.New("failure on remote sync site")
return
}
var hashRemote []byte
err = decoder.Decode(&hashRemote)
if err != nil {
log.Fatal("Cient protocol remote hash error:", err)
return
}
// Compare file hashes
hashLocal = fileHasher.Sum(nil)
if isHashDifferent(hashLocal, hashRemote) || FailPointFileHashMatch() {
log.Warn("hashLocal =", hashLocal)
log.Warn("hashRemote=", hashRemote)
err = &HashCollsisionError{}
} else {
retry = false // success, don't retry anymore
}
// Final retry negotiation
{
err1 := encoder.Encode(retry)
if err1 != nil {
log.Fatal("Cient protocol remote retry error:", err)
}
err1 = decoder.Decode(&statusRemote)
if err1 != nil {
log.Fatal("Cient protocol remote retry status error:", err)
}
}
return
}
func isHashDifferent(a, b []byte) bool {
return !bytes.Equal(a, b)
}
func processFileInterval(local HashedDataInterval, remote HashedInterval, netStream chan<- diffChunk) {
if local.Interval != remote.Interval {
log.Fatal("Sync.processFileInterval range internal error:", local.FileInterval, remote.FileInterval)
}
if local.Kind != remote.Kind {
// Different intreval types, send the diff
if local.Kind == SparseData && int64(len(local.Data)) != local.FileInterval.Len() {
log.Fatal("Sync.processFileInterval data internal error:", local.FileInterval.Len(), len(local.Data))
}
netStream <- diffChunk{true, DataInterval{local.FileInterval, local.Data}}
return
}
// The interval types are the same
if SparseHole == local.Kind {
// Process hole, no syncronization is required
local.Kind = SparseIgnore
netStream <- diffChunk{true, DataInterval{local.FileInterval, local.Data}}
return
}
if local.Kind != SparseData {
log.Fatal("Sync.processFileInterval kind internal error:", local.FileInterval)
}
// Data file interval
if isHashDifferent(local.Hash, remote.Hash) {
if int64(len(local.Data)) != local.FileInterval.Len() {
log.Fatal("Sync.processFileInterval internal error:", local.FileInterval.Len(), len(local.Data))
}
netStream <- diffChunk{true, DataInterval{local.FileInterval, local.Data}}
return
}
// No diff, just communicate we processed it
//TODO: this apparently can be avoided but requires revision of the protocol
local.Kind = SparseIgnore
netStream <- diffChunk{true, DataInterval{local.FileInterval, make([]byte, 0)}}
}
// prints chan codes and lengths to trace
// - sequence and interleaving of chan processing
// - how much of the chan buffer is used
const traceChannelLoad = false
type netXferStatus struct {
status bool
byteCount int64
}
func networkSender(netStream <-chan diffChunk, encoder *gob.Encoder, netStatus chan<- netXferStatus) {
status := true
byteCount := int64(0)
for {
chunk := <-netStream
if 0 == chunk.header.Len() {
// eof: last 0 len header
if verboseClient {
log.Debug("Client.networkSender <eof>")
}
err := encoder.Encode(chunk.header.FileInterval)
if err != nil {
log.Fatal("Client protocol encoder error:", err)
status = false
}
break
}
if !status {
// network error
continue // discard the chunk
}
if !chunk.status {
// read error
status = false
continue // discard the chunk
}
if traceChannelLoad {
fmt.Fprint(os.Stderr, len(netStream), "n")
}
// Encode and send data to the network
if verboseClient {
log.Debug("Client.networkSender sending:", chunk.header.FileInterval)
}
err := encoder.Encode(chunk.header.FileInterval)
if err != nil {
log.Fatal("Client protocol encoder error:", err)
status = false
continue
}
if len(chunk.header.Data) == 0 {
continue
}
if verboseClient {
log.Debug("Client.networkSender sending data")
}
if int64(len(chunk.header.Data)) != chunk.header.FileInterval.Len() {
log.Fatal("Client.networkSender sending data internal error:", chunk.header.FileInterval.Len(), len(chunk.header.Data))
}
err = encoder.Encode(chunk.header.Data)
if err != nil {
log.Fatal("Client protocol encoder error:", err)
status = false
continue
}
byteCount += int64(len(chunk.header.Data))
if traceChannelLoad {
fmt.Fprint(os.Stderr, "N\n")
}
}
netStatus <- netXferStatus{status, byteCount}
}
ssync: tweaked custom error construction (review)
package sparse
import (
"crypto/sha1"
"net"
"os"
"strconv"
"bytes"
"encoding/binary"
"encoding/gob"
"errors"
"fmt"
"time"
fio "github.com/rancher/sparse-tools/directfio"
"github.com/rancher/sparse-tools/log"
)
// HashCollsisionError indicates block hash collision
type HashCollsisionError struct{}
func (e *HashCollsisionError) Error() string {
return "file hash divergence: storage error or block hash collision"
}
// TCPEndPoint tcp connection address
type TCPEndPoint struct {
Host string
Port int16
}
const connectionRetries = 5
const verboseClient = true
// SyncFile synchronizes local file to remote host
func SyncFile(localPath string, addr TCPEndPoint, remotePath string, timeout int) (hashLocal []byte, err error) {
for retries := 1; retries >= 0; retries-- {
hashLocal, err = syncFile(localPath, addr, remotePath, timeout, retries > 0)
if err != nil {
if _, ok := err.(*HashCollsisionError); ok {
// retry on HahsCollisionError
log.Warn("SSync: retrying on chunk hash collision...")
continue
} else {
log.Error("SSync error:", err)
}
}
break
}
return
}
func syncFile(localPath string, addr TCPEndPoint, remotePath string, timeout int, retry bool) ([]byte, error) {
file, err := fio.OpenFile(localPath, os.O_RDONLY, 0)
if err != nil {
log.Error("Failed to open local source file:", localPath)
return nil, err
}
defer file.Close()
size, err := file.Seek(0, os.SEEK_END)
if err != nil {
log.Error("Failed to get size of local source file:", localPath, err)
return nil, err
}
SetupFileIO(size%Blocks == 0)
conn := connect(addr.Host, strconv.Itoa(int(addr.Port)), timeout)
if nil == conn {
err = fmt.Errorf("Failed to connect to %v", addr)
log.Error(err)
return nil, err
}
defer conn.Close()
encoder := gob.NewEncoder(conn)
decoder := gob.NewDecoder(conn)
// Use unix time as hash salt
salt := make([]byte, binary.MaxVarintLen64)
binary.PutVarint(salt, time.Now().UnixNano())
status := sendSyncRequest(encoder, decoder, remotePath, size, salt)
if !status {
err = fmt.Errorf("Sync request to %v failed", remotePath)
log.Error(err)
return nil, err
}
abortStream := make(chan error)
layoutStream := make(chan FileInterval, 128)
errStream := make(chan error)
// Initiate interval loading...
err = loadFileLayout(abortStream, file, layoutStream, errStream)
if err != nil {
log.Error("Failed to retrieve local file layout:", err)
return nil, err
}
fileStream := make(chan FileInterval, 128)
unorderedStream := make(chan HashedDataInterval, 128)
orderedStream := make(chan HashedDataInterval, 128)
go IntervalSplitter(layoutStream, fileStream)
FileReaderGroup(fileReaders, salt, fileStream, localPath, unorderedStream)
go OrderIntervals("src:", unorderedStream, orderedStream)
// Get remote file intervals and their hashes
netInStream := make(chan HashedInterval, 128)
netInStreamDone := make(chan bool)
go netDstReceiver(decoder, netInStream, netInStreamDone)
return processDiff(salt, abortStream, errStream, encoder, decoder, orderedStream, netInStream, netInStreamDone, retry)
}
func connect(host, port string, timeout int) net.Conn {
// connect to this socket
endpoint := host + ":" + port
raddr, err := net.ResolveTCPAddr("tcp", endpoint)
if err != nil {
log.Fatal("Connection address resolution error:", err)
}
timeStart := time.Now()
timeStop := timeStart.Add(time.Duration(timeout) * time.Second)
for timeNow := timeStart; timeNow.Before(timeStop); timeNow = time.Now() {
conn, err := net.DialTCP("tcp", nil, raddr)
if err == nil {
return conn
}
log.Warn("Failed connection to", endpoint, "Retrying...")
if timeNow != timeStart {
// only sleep after the second attempt to speedup tests
time.Sleep(1 * time.Second)
}
}
return nil
}
func sendSyncRequest(encoder *gob.Encoder, decoder *gob.Decoder, path string, size int64, salt []byte) bool {
err := encoder.Encode(requestHeader{requestMagic, syncRequestCode})
if err != nil {
log.Fatal("Client protocol encoder error:", err)
return false
}
err = encoder.Encode(path)
if err != nil {
log.Fatal("Client protocol encoder error:", err)
return false
}
err = encoder.Encode(size)
if err != nil {
log.Fatal("Client protocol encoder error:", err)
return false
}
err = encoder.Encode(salt)
if err != nil {
log.Fatal("Client protocol encoder error:", err)
return false
}
var ack bool
err = decoder.Decode(&ack)
if err != nil {
log.Fatal("Client protocol decoder error:", err)
return false
}
return ack
}
// Get remote hashed intervals
func netDstReceiver(decoder *gob.Decoder, netInStream chan<- HashedInterval, netInStreamDone chan<- bool) {
status := true
for {
if verboseClient {
log.Debug("Client.netDstReceiver decoding...")
}
var r HashedInterval
err := decoder.Decode(&r)
if err != nil {
log.Fatal("Cient protocol error:", err)
status = false
break
}
// interval := r.Interval
if r.Kind == SparseIgnore {
if verboseClient {
log.Debug("Client.netDstReceiver got <eof>")
}
break
}
if verboseClient {
switch r.Kind {
case SparseData:
log.Debug("Client.netDstReceiver got data", r.FileInterval, "hash[", len(r.Hash), "]")
case SparseHole:
log.Debug("Client.netDstReceiver got hole", r.FileInterval)
}
}
netInStream <- r
}
close(netInStream)
netInStreamDone <- status
}
// file reading chunk
type fileChunk struct {
eof bool // end of stream: stop reader
header FileInterval
}
// network transfer chunk
type diffChunk struct {
status bool // read file or network send error yield false
header DataInterval
}
func processDiff(salt []byte, abortStream chan<- error, errStream <-chan error, encoder *gob.Encoder, decoder *gob.Decoder, local <-chan HashedDataInterval, remote <-chan HashedInterval, netInStreamDone <-chan bool, retry bool) (hashLocal []byte, err error) {
// Local: __ _*
// Remote: *_ **
hashLocal = make([]byte, 0) // empty hash for errors
const concurrentReaders = 4
netStream := make(chan diffChunk, 128)
netStatus := make(chan netXferStatus)
go networkSender(netStream, encoder, netStatus)
fileHasher := sha1.New()
fileHasher.Write(salt)
lrange := <-local
rrange := <-remote
for lrange.Len() != 0 {
if rrange.Len() == 0 {
// Copy local tail
if verboseClient {
logData("LHASH", lrange.Data)
}
hashFileData(fileHasher, lrange.Len(), lrange.Data)
processFileInterval(lrange, HashedInterval{FileInterval{SparseHole, lrange.Interval}, make([]byte, 0)}, netStream)
lrange = <-local
continue
}
// Diff
if verboseClient {
log.Debug("Diff:", lrange.HashedInterval, rrange)
}
if lrange.Begin == rrange.Begin {
if lrange.End > rrange.End {
data := lrange.Data
if len(data) > 0 {
data = lrange.Data[:rrange.Len()]
}
subrange := HashedDataInterval{HashedInterval{FileInterval{lrange.Kind, rrange.Interval}, lrange.Hash}, data}
if verboseClient {
logData("LHASH", subrange.Data)
}
hashFileData(fileHasher, subrange.Len(), subrange.Data)
processFileInterval(subrange, rrange, netStream)
if len(data) > 0 {
lrange.Data = lrange.Data[subrange.Len():]
}
lrange.Begin = rrange.End
rrange = <-remote
continue
} else if lrange.End < rrange.End {
if verboseClient {
logData("LHASH", lrange.Data)
}
hashFileData(fileHasher, lrange.Len(), lrange.Data)
processFileInterval(lrange, HashedInterval{FileInterval{rrange.Kind, lrange.Interval}, make([]byte, 0)}, netStream)
rrange.Begin = lrange.End
lrange = <-local
continue
}
if verboseClient {
logData("LHASH", lrange.Data)
}
hashFileData(fileHasher, lrange.Len(), lrange.Data)
processFileInterval(lrange, rrange, netStream)
lrange = <-local
rrange = <-remote
} else {
// Should never happen
log.Fatal("processDiff internal error")
return
}
}
log.Info("Finished processing file diff")
status := true
err = <-errStream
if err != nil {
log.Error("Sync client file load aborted:", err)
status = false
}
// make sure we finished consuming dst hashes
status = <-netInStreamDone && status // netDstReceiver finished
log.Info("Finished consuming remote file hashes, status=", status)
// Send end of transmission
netStream <- diffChunk{true, DataInterval{FileInterval{SparseIgnore, Interval{0, 0}}, make([]byte, 0)}}
// get network sender status
net := <-netStatus
log.Info("Finished sending file diff of", net.byteCount, "(bytes), status=", net.status)
if !net.status {
err = errors.New("netwoek transfer failure")
return
}
var statusRemote bool
err = decoder.Decode(&statusRemote)
if err != nil {
log.Fatal("Cient protocol remote status error:", err)
return
}
if !statusRemote {
err = errors.New("failure on remote sync site")
return
}
var hashRemote []byte
err = decoder.Decode(&hashRemote)
if err != nil {
log.Fatal("Cient protocol remote hash error:", err)
return
}
// Compare file hashes
hashLocal = fileHasher.Sum(nil)
if isHashDifferent(hashLocal, hashRemote) || FailPointFileHashMatch() {
log.Warn("hashLocal =", hashLocal)
log.Warn("hashRemote=", hashRemote)
err = &HashCollsisionError{}
} else {
retry = false // success, don't retry anymore
}
// Final retry negotiation
{
err1 := encoder.Encode(retry)
if err1 != nil {
log.Fatal("Cient protocol remote retry error:", err)
}
err1 = decoder.Decode(&statusRemote)
if err1 != nil {
log.Fatal("Cient protocol remote retry status error:", err)
}
}
return
}
func isHashDifferent(a, b []byte) bool {
return !bytes.Equal(a, b)
}
func processFileInterval(local HashedDataInterval, remote HashedInterval, netStream chan<- diffChunk) {
if local.Interval != remote.Interval {
log.Fatal("Sync.processFileInterval range internal error:", local.FileInterval, remote.FileInterval)
}
if local.Kind != remote.Kind {
// Different intreval types, send the diff
if local.Kind == SparseData && int64(len(local.Data)) != local.FileInterval.Len() {
log.Fatal("Sync.processFileInterval data internal error:", local.FileInterval.Len(), len(local.Data))
}
netStream <- diffChunk{true, DataInterval{local.FileInterval, local.Data}}
return
}
// The interval types are the same
if SparseHole == local.Kind {
// Process hole, no syncronization is required
local.Kind = SparseIgnore
netStream <- diffChunk{true, DataInterval{local.FileInterval, local.Data}}
return
}
if local.Kind != SparseData {
log.Fatal("Sync.processFileInterval kind internal error:", local.FileInterval)
}
// Data file interval
if isHashDifferent(local.Hash, remote.Hash) {
if int64(len(local.Data)) != local.FileInterval.Len() {
log.Fatal("Sync.processFileInterval internal error:", local.FileInterval.Len(), len(local.Data))
}
netStream <- diffChunk{true, DataInterval{local.FileInterval, local.Data}}
return
}
// No diff, just communicate we processed it
//TODO: this apparently can be avoided but requires revision of the protocol
local.Kind = SparseIgnore
netStream <- diffChunk{true, DataInterval{local.FileInterval, make([]byte, 0)}}
}
// prints chan codes and lengths to trace
// - sequence and interleaving of chan processing
// - how much of the chan buffer is used
const traceChannelLoad = false
type netXferStatus struct {
status bool
byteCount int64
}
func networkSender(netStream <-chan diffChunk, encoder *gob.Encoder, netStatus chan<- netXferStatus) {
status := true
byteCount := int64(0)
for {
chunk := <-netStream
if 0 == chunk.header.Len() {
// eof: last 0 len header
if verboseClient {
log.Debug("Client.networkSender <eof>")
}
err := encoder.Encode(chunk.header.FileInterval)
if err != nil {
log.Fatal("Client protocol encoder error:", err)
status = false
}
break
}
if !status {
// network error
continue // discard the chunk
}
if !chunk.status {
// read error
status = false
continue // discard the chunk
}
if traceChannelLoad {
fmt.Fprint(os.Stderr, len(netStream), "n")
}
// Encode and send data to the network
if verboseClient {
log.Debug("Client.networkSender sending:", chunk.header.FileInterval)
}
err := encoder.Encode(chunk.header.FileInterval)
if err != nil {
log.Fatal("Client protocol encoder error:", err)
status = false
continue
}
if len(chunk.header.Data) == 0 {
continue
}
if verboseClient {
log.Debug("Client.networkSender sending data")
}
if int64(len(chunk.header.Data)) != chunk.header.FileInterval.Len() {
log.Fatal("Client.networkSender sending data internal error:", chunk.header.FileInterval.Len(), len(chunk.header.Data))
}
err = encoder.Encode(chunk.header.Data)
if err != nil {
log.Fatal("Client protocol encoder error:", err)
status = false
continue
}
byteCount += int64(len(chunk.header.Data))
if traceChannelLoad {
fmt.Fprint(os.Stderr, "N\n")
}
}
netStatus <- netXferStatus{status, byteCount}
}
|
package webhook
import (
"crypto/hmac"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"html"
"io/ioutil"
"net/http"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/google/go-github/github"
"github.com/matrix-org/gomatrix"
"github.com/matrix-org/util"
)
// OnReceiveRequest processes incoming github webhook requests and returns a
// matrix message to send, along with parsed repo information.
// The secretToken, if supplied, will be used to verify the request is from
// Github. If it isn't, an error is returned.
func OnReceiveRequest(r *http.Request, secretToken string) (string, *github.Repository, *gomatrix.HTMLMessage, *util.JSONResponse) {
// Verify the HMAC signature if NEB was configured with a secret token
eventType := r.Header.Get("X-GitHub-Event")
signatureSHA1 := r.Header.Get("X-Hub-Signature")
content, err := ioutil.ReadAll(r.Body)
if err != nil {
log.WithError(err).Print("Failed to read Github webhook body")
resErr := util.MessageResponse(400, "Failed to parse body")
return "", nil, nil, &resErr
}
// Verify request if a secret token has been supplied.
if secretToken != "" {
sigHex := strings.Split(signatureSHA1, "=")[1]
var sigBytes []byte
sigBytes, err = hex.DecodeString(sigHex)
if err != nil {
log.WithError(err).WithField("X-Hub-Signature", sigHex).Print(
"Failed to decode signature as hex.")
resErr := util.MessageResponse(400, "Failed to decode signature")
return "", nil, nil, &resErr
}
if !checkMAC([]byte(content), sigBytes, []byte(secretToken)) {
log.WithFields(log.Fields{
"X-Hub-Signature": signatureSHA1,
}).Print("Received Github event which failed MAC check.")
resErr := util.MessageResponse(403, "Bad signature")
return "", nil, nil, &resErr
}
}
log.WithFields(log.Fields{
"event_type": eventType,
"signature": signatureSHA1,
}).Print("Received Github event")
if eventType == "ping" {
// Github will send a "ping" event when the webhook is first created. We need
// to return a 200 in order for the webhook to be marked as "up" (this doesn't
// affect delivery, just the tick/cross status flag).
res := util.MessageResponse(200, "pong")
return "", nil, nil, &res
}
htmlStr, repo, refinedType, err := parseGithubEvent(eventType, content)
if err != nil {
log.WithError(err).Print("Failed to parse github event")
resErr := util.MessageResponse(500, "Failed to parse github event")
return "", nil, nil, &resErr
}
msg := gomatrix.GetHTMLMessage("m.notice", htmlStr)
return refinedType, repo, &msg, nil
}
// checkMAC reports whether messageMAC is a valid HMAC tag for message.
func checkMAC(message, messageMAC, key []byte) bool {
mac := hmac.New(sha1.New, key)
mac.Write(message)
expectedMAC := mac.Sum(nil)
return hmac.Equal(messageMAC, expectedMAC)
}
// parseGithubEvent parses a github event type and JSON data and returns an explanatory
// HTML string, the github repository and the refined event type, or an error.
func parseGithubEvent(eventType string, data []byte) (string, *github.Repository, string, error) {
if eventType == "pull_request" {
var ev github.PullRequestEvent
if err := json.Unmarshal(data, &ev); err != nil {
return "", nil, eventType, err
}
refinedEventType := refineEventType(eventType, ev.Action)
return pullRequestHTMLMessage(ev), ev.Repo, refinedEventType, nil
} else if eventType == "issues" {
var ev github.IssuesEvent
if err := json.Unmarshal(data, &ev); err != nil {
return "", nil, eventType, err
}
refinedEventType := refineEventType(eventType, ev.Action)
return issueHTMLMessage(ev), ev.Repo, refinedEventType, nil
} else if eventType == "push" {
var ev github.PushEvent
if err := json.Unmarshal(data, &ev); err != nil {
return "", nil, eventType, err
}
// The 'push' event repository format is subtly different from normal, so munge the bits we need.
fullName := *ev.Repo.Owner.Name + "/" + *ev.Repo.Name
repo := github.Repository{
Owner: &github.User{
Login: ev.Repo.Owner.Name,
},
Name: ev.Repo.Name,
FullName: &fullName,
}
return pushHTMLMessage(ev), &repo, eventType, nil
} else if eventType == "issue_comment" {
var ev github.IssueCommentEvent
if err := json.Unmarshal(data, &ev); err != nil {
return "", nil, eventType, err
}
return issueCommentHTMLMessage(ev), ev.Repo, eventType, nil
} else if eventType == "pull_request_review_comment" {
var ev github.PullRequestReviewCommentEvent
if err := json.Unmarshal(data, &ev); err != nil {
return "", nil, eventType, err
}
return prReviewCommentHTMLMessage(ev), ev.Repo, eventType, nil
}
return "", nil, eventType, fmt.Errorf("Unrecognized event type")
}
func refineEventType(eventType string, action *string) string {
if action == nil {
return eventType
}
a := *action
if a == "assigned" || a == "unassigned" {
return "assignments"
} else if a == "milestoned" || a == "demilestoned" {
return "milestones"
} else if a == "labeled" || a == "unlabeled" {
return "labels"
}
return eventType
}
func pullRequestHTMLMessage(p github.PullRequestEvent) string {
var actionTarget string
if p.PullRequest.Assignee != nil && p.PullRequest.Assignee.Login != nil {
actionTarget = fmt.Sprintf(" to %s", *p.PullRequest.Assignee.Login)
}
return fmt.Sprintf(
"[<u>%s</u>] %s %s <b>pull request #%d</b>: %s [%s]%s - %s",
html.EscapeString(*p.Repo.FullName),
html.EscapeString(*p.Sender.Login),
html.EscapeString(*p.Action),
*p.Number,
html.EscapeString(*p.PullRequest.Title),
html.EscapeString(*p.PullRequest.State),
html.EscapeString(actionTarget),
html.EscapeString(*p.PullRequest.HTMLURL),
)
}
func issueHTMLMessage(p github.IssuesEvent) string {
var actionTarget string
if p.Issue.Assignee != nil && p.Issue.Assignee.Login != nil {
actionTarget = fmt.Sprintf(" to %s", *p.Issue.Assignee.Login)
}
action := html.EscapeString(*p.Action)
if p.Label != nil && (*p.Action == "labeled" || *p.Action == "unlabeled") {
action = *p.Action + " [" + html.EscapeString(*p.Label.Name) + "] to"
}
return fmt.Sprintf(
"[<u>%s</u>] %s %s <b>issue #%d</b>: %s [%s]%s - %s",
html.EscapeString(*p.Repo.FullName),
html.EscapeString(*p.Sender.Login),
action,
*p.Issue.Number,
html.EscapeString(*p.Issue.Title),
html.EscapeString(*p.Issue.State),
html.EscapeString(actionTarget),
html.EscapeString(*p.Issue.HTMLURL),
)
}
func issueCommentHTMLMessage(p github.IssueCommentEvent) string {
var kind string
if p.Issue.PullRequestLinks == nil {
kind = "issue"
} else {
kind = "pull request"
}
return fmt.Sprintf(
"[<u>%s</u>] %s commented on %s's <b>%s #%d</b>: %s - %s",
html.EscapeString(*p.Repo.FullName),
html.EscapeString(*p.Comment.User.Login),
html.EscapeString(*p.Issue.User.Login),
kind,
*p.Issue.Number,
html.EscapeString(*p.Issue.Title),
html.EscapeString(*p.Issue.HTMLURL),
)
}
func prReviewCommentHTMLMessage(p github.PullRequestReviewCommentEvent) string {
assignee := "None"
if p.PullRequest.Assignee != nil {
assignee = html.EscapeString(*p.PullRequest.Assignee.Login)
}
return fmt.Sprintf(
"[<u>%s</u>] %s made a line comment on %s's <b>pull request #%d</b> (assignee: %s): %s - %s",
html.EscapeString(*p.Repo.FullName),
html.EscapeString(*p.Sender.Login),
html.EscapeString(*p.PullRequest.User.Login),
*p.PullRequest.Number,
assignee,
html.EscapeString(*p.PullRequest.Title),
html.EscapeString(*p.Comment.HTMLURL),
)
}
func pushHTMLMessage(p github.PushEvent) string {
// /refs/heads/alice/branch-name => alice/branch-name
branch := strings.Replace(*p.Ref, "refs/heads/", "", -1)
// this branch was deleted, no HeadCommit object and deleted=true
if p.HeadCommit == nil && p.Deleted != nil && *p.Deleted {
return fmt.Sprintf(
`[<u>%s</u>] %s <font color="red"><b>deleted</font> %s</b>`,
html.EscapeString(*p.Repo.FullName),
html.EscapeString(*p.Pusher.Name),
html.EscapeString(branch),
)
}
if p.Commits != nil && len(p.Commits) > 1 {
// multi-commit message
// [<repo>] <username> pushed <num> commits to <branch>: <git.io link>
// <up to 3 commits>
var cList []string
for _, c := range p.Commits {
cList = append(cList, fmt.Sprintf(
`%s: %s`,
html.EscapeString(nameForAuthor(c.Author)),
html.EscapeString(*c.Message),
))
}
return fmt.Sprintf(
`[<u>%s</u>] %s pushed %d commits to <b>%s</b>: %s<br>%s`,
html.EscapeString(*p.Repo.FullName),
html.EscapeString(nameForAuthor(p.HeadCommit.Committer)),
len(p.Commits),
html.EscapeString(branch),
html.EscapeString(*p.HeadCommit.URL),
strings.Join(cList, "<br>"),
)
}
// single commit message
// [<repo>] <username> pushed to <branch>: <msg> - <git.io link>
return fmt.Sprintf(
`[<u>%s</u>] %s pushed to <b>%s</b>: %s - %s`,
html.EscapeString(*p.Repo.FullName),
html.EscapeString(nameForAuthor(p.HeadCommit.Committer)),
html.EscapeString(branch),
html.EscapeString(*p.HeadCommit.Message),
html.EscapeString(*p.HeadCommit.URL),
)
}
func nameForAuthor(a *github.CommitAuthor) string {
if a == nil {
return ""
}
if a.Login != nil { // prefer to use their GH username than the name they commited as
return *a.Login
}
return *a.Name
}
Fix invalid HTML on bot message when a branch is deleted
package webhook
import (
"crypto/hmac"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"fmt"
"html"
"io/ioutil"
"net/http"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/google/go-github/github"
"github.com/matrix-org/gomatrix"
"github.com/matrix-org/util"
)
// OnReceiveRequest processes incoming github webhook requests and returns a
// matrix message to send, along with parsed repo information.
// The secretToken, if supplied, will be used to verify the request is from
// Github. If it isn't, an error is returned.
func OnReceiveRequest(r *http.Request, secretToken string) (string, *github.Repository, *gomatrix.HTMLMessage, *util.JSONResponse) {
// Verify the HMAC signature if NEB was configured with a secret token
eventType := r.Header.Get("X-GitHub-Event")
signatureSHA1 := r.Header.Get("X-Hub-Signature")
content, err := ioutil.ReadAll(r.Body)
if err != nil {
log.WithError(err).Print("Failed to read Github webhook body")
resErr := util.MessageResponse(400, "Failed to parse body")
return "", nil, nil, &resErr
}
// Verify request if a secret token has been supplied.
if secretToken != "" {
sigHex := strings.Split(signatureSHA1, "=")[1]
var sigBytes []byte
sigBytes, err = hex.DecodeString(sigHex)
if err != nil {
log.WithError(err).WithField("X-Hub-Signature", sigHex).Print(
"Failed to decode signature as hex.")
resErr := util.MessageResponse(400, "Failed to decode signature")
return "", nil, nil, &resErr
}
if !checkMAC([]byte(content), sigBytes, []byte(secretToken)) {
log.WithFields(log.Fields{
"X-Hub-Signature": signatureSHA1,
}).Print("Received Github event which failed MAC check.")
resErr := util.MessageResponse(403, "Bad signature")
return "", nil, nil, &resErr
}
}
log.WithFields(log.Fields{
"event_type": eventType,
"signature": signatureSHA1,
}).Print("Received Github event")
if eventType == "ping" {
// Github will send a "ping" event when the webhook is first created. We need
// to return a 200 in order for the webhook to be marked as "up" (this doesn't
// affect delivery, just the tick/cross status flag).
res := util.MessageResponse(200, "pong")
return "", nil, nil, &res
}
htmlStr, repo, refinedType, err := parseGithubEvent(eventType, content)
if err != nil {
log.WithError(err).Print("Failed to parse github event")
resErr := util.MessageResponse(500, "Failed to parse github event")
return "", nil, nil, &resErr
}
msg := gomatrix.GetHTMLMessage("m.notice", htmlStr)
return refinedType, repo, &msg, nil
}
// checkMAC reports whether messageMAC is a valid HMAC tag for message.
func checkMAC(message, messageMAC, key []byte) bool {
mac := hmac.New(sha1.New, key)
mac.Write(message)
expectedMAC := mac.Sum(nil)
return hmac.Equal(messageMAC, expectedMAC)
}
// parseGithubEvent parses a github event type and JSON data and returns an explanatory
// HTML string, the github repository and the refined event type, or an error.
func parseGithubEvent(eventType string, data []byte) (string, *github.Repository, string, error) {
if eventType == "pull_request" {
var ev github.PullRequestEvent
if err := json.Unmarshal(data, &ev); err != nil {
return "", nil, eventType, err
}
refinedEventType := refineEventType(eventType, ev.Action)
return pullRequestHTMLMessage(ev), ev.Repo, refinedEventType, nil
} else if eventType == "issues" {
var ev github.IssuesEvent
if err := json.Unmarshal(data, &ev); err != nil {
return "", nil, eventType, err
}
refinedEventType := refineEventType(eventType, ev.Action)
return issueHTMLMessage(ev), ev.Repo, refinedEventType, nil
} else if eventType == "push" {
var ev github.PushEvent
if err := json.Unmarshal(data, &ev); err != nil {
return "", nil, eventType, err
}
// The 'push' event repository format is subtly different from normal, so munge the bits we need.
fullName := *ev.Repo.Owner.Name + "/" + *ev.Repo.Name
repo := github.Repository{
Owner: &github.User{
Login: ev.Repo.Owner.Name,
},
Name: ev.Repo.Name,
FullName: &fullName,
}
return pushHTMLMessage(ev), &repo, eventType, nil
} else if eventType == "issue_comment" {
var ev github.IssueCommentEvent
if err := json.Unmarshal(data, &ev); err != nil {
return "", nil, eventType, err
}
return issueCommentHTMLMessage(ev), ev.Repo, eventType, nil
} else if eventType == "pull_request_review_comment" {
var ev github.PullRequestReviewCommentEvent
if err := json.Unmarshal(data, &ev); err != nil {
return "", nil, eventType, err
}
return prReviewCommentHTMLMessage(ev), ev.Repo, eventType, nil
}
return "", nil, eventType, fmt.Errorf("Unrecognized event type")
}
func refineEventType(eventType string, action *string) string {
if action == nil {
return eventType
}
a := *action
if a == "assigned" || a == "unassigned" {
return "assignments"
} else if a == "milestoned" || a == "demilestoned" {
return "milestones"
} else if a == "labeled" || a == "unlabeled" {
return "labels"
}
return eventType
}
func pullRequestHTMLMessage(p github.PullRequestEvent) string {
var actionTarget string
if p.PullRequest.Assignee != nil && p.PullRequest.Assignee.Login != nil {
actionTarget = fmt.Sprintf(" to %s", *p.PullRequest.Assignee.Login)
}
return fmt.Sprintf(
"[<u>%s</u>] %s %s <b>pull request #%d</b>: %s [%s]%s - %s",
html.EscapeString(*p.Repo.FullName),
html.EscapeString(*p.Sender.Login),
html.EscapeString(*p.Action),
*p.Number,
html.EscapeString(*p.PullRequest.Title),
html.EscapeString(*p.PullRequest.State),
html.EscapeString(actionTarget),
html.EscapeString(*p.PullRequest.HTMLURL),
)
}
func issueHTMLMessage(p github.IssuesEvent) string {
var actionTarget string
if p.Issue.Assignee != nil && p.Issue.Assignee.Login != nil {
actionTarget = fmt.Sprintf(" to %s", *p.Issue.Assignee.Login)
}
action := html.EscapeString(*p.Action)
if p.Label != nil && (*p.Action == "labeled" || *p.Action == "unlabeled") {
action = *p.Action + " [" + html.EscapeString(*p.Label.Name) + "] to"
}
return fmt.Sprintf(
"[<u>%s</u>] %s %s <b>issue #%d</b>: %s [%s]%s - %s",
html.EscapeString(*p.Repo.FullName),
html.EscapeString(*p.Sender.Login),
action,
*p.Issue.Number,
html.EscapeString(*p.Issue.Title),
html.EscapeString(*p.Issue.State),
html.EscapeString(actionTarget),
html.EscapeString(*p.Issue.HTMLURL),
)
}
func issueCommentHTMLMessage(p github.IssueCommentEvent) string {
var kind string
if p.Issue.PullRequestLinks == nil {
kind = "issue"
} else {
kind = "pull request"
}
return fmt.Sprintf(
"[<u>%s</u>] %s commented on %s's <b>%s #%d</b>: %s - %s",
html.EscapeString(*p.Repo.FullName),
html.EscapeString(*p.Comment.User.Login),
html.EscapeString(*p.Issue.User.Login),
kind,
*p.Issue.Number,
html.EscapeString(*p.Issue.Title),
html.EscapeString(*p.Issue.HTMLURL),
)
}
func prReviewCommentHTMLMessage(p github.PullRequestReviewCommentEvent) string {
assignee := "None"
if p.PullRequest.Assignee != nil {
assignee = html.EscapeString(*p.PullRequest.Assignee.Login)
}
return fmt.Sprintf(
"[<u>%s</u>] %s made a line comment on %s's <b>pull request #%d</b> (assignee: %s): %s - %s",
html.EscapeString(*p.Repo.FullName),
html.EscapeString(*p.Sender.Login),
html.EscapeString(*p.PullRequest.User.Login),
*p.PullRequest.Number,
assignee,
html.EscapeString(*p.PullRequest.Title),
html.EscapeString(*p.Comment.HTMLURL),
)
}
func pushHTMLMessage(p github.PushEvent) string {
// /refs/heads/alice/branch-name => alice/branch-name
branch := strings.Replace(*p.Ref, "refs/heads/", "", -1)
// this branch was deleted, no HeadCommit object and deleted=true
if p.HeadCommit == nil && p.Deleted != nil && *p.Deleted {
return fmt.Sprintf(
`[<u>%s</u>] %s <b><font color="red">deleted</font> %s</b>`,
html.EscapeString(*p.Repo.FullName),
html.EscapeString(*p.Pusher.Name),
html.EscapeString(branch),
)
}
if p.Commits != nil && len(p.Commits) > 1 {
// multi-commit message
// [<repo>] <username> pushed <num> commits to <branch>: <git.io link>
// <up to 3 commits>
var cList []string
for _, c := range p.Commits {
cList = append(cList, fmt.Sprintf(
`%s: %s`,
html.EscapeString(nameForAuthor(c.Author)),
html.EscapeString(*c.Message),
))
}
return fmt.Sprintf(
`[<u>%s</u>] %s pushed %d commits to <b>%s</b>: %s<br>%s`,
html.EscapeString(*p.Repo.FullName),
html.EscapeString(nameForAuthor(p.HeadCommit.Committer)),
len(p.Commits),
html.EscapeString(branch),
html.EscapeString(*p.HeadCommit.URL),
strings.Join(cList, "<br>"),
)
}
// single commit message
// [<repo>] <username> pushed to <branch>: <msg> - <git.io link>
return fmt.Sprintf(
`[<u>%s</u>] %s pushed to <b>%s</b>: %s - %s`,
html.EscapeString(*p.Repo.FullName),
html.EscapeString(nameForAuthor(p.HeadCommit.Committer)),
html.EscapeString(branch),
html.EscapeString(*p.HeadCommit.Message),
html.EscapeString(*p.HeadCommit.URL),
)
}
func nameForAuthor(a *github.CommitAuthor) string {
if a == nil {
return ""
}
if a.Login != nil { // prefer to use their GH username than the name they commited as
return *a.Login
}
return *a.Name
}
|
package cli
import (
"bytes"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"strings"
"testing"
)
var (
lastExitCode = 0
fakeOsExiter = func(rc int) {
lastExitCode = rc
}
fakeErrWriter = &bytes.Buffer{}
)
func init() {
OsExiter = fakeOsExiter
ErrWriter = fakeErrWriter
}
type opCounts struct {
Total, BashComplete, OnUsageError, Before, CommandNotFound, Action, After, SubCommand int
}
func ExampleApp_Run() {
// set args for examples sake
os.Args = []string{"greet", "--name", "Jeremy"}
app := NewApp()
app.Name = "greet"
app.Flags = []Flag{
StringFlag{Name: "name", Value: "bob", Usage: "a name to say"},
}
app.Action = func(c *Context) error {
fmt.Printf("Hello %v\n", c.String("name"))
return nil
}
app.UsageText = "app [first_arg] [second_arg]"
app.Author = "Harrison"
app.Email = "harrison@lolwut.com"
app.Authors = []Author{{Name: "Oliver Allen", Email: "oliver@toyshop.com"}}
app.Run(os.Args)
// Output:
// Hello Jeremy
}
func ExampleApp_Run_subcommand() {
// set args for examples sake
os.Args = []string{"say", "hi", "english", "--name", "Jeremy"}
app := NewApp()
app.Name = "say"
app.Commands = []Command{
{
Name: "hello",
Aliases: []string{"hi"},
Usage: "use it to see a description",
Description: "This is how we describe hello the function",
Subcommands: []Command{
{
Name: "english",
Aliases: []string{"en"},
Usage: "sends a greeting in english",
Description: "greets someone in english",
Flags: []Flag{
StringFlag{
Name: "name",
Value: "Bob",
Usage: "Name of the person to greet",
},
},
Action: func(c *Context) error {
fmt.Println("Hello,", c.String("name"))
return nil
},
},
},
},
}
app.Run(os.Args)
// Output:
// Hello, Jeremy
}
func ExampleApp_Run_appHelp() {
// set args for examples sake
os.Args = []string{"greet", "help"}
app := NewApp()
app.Name = "greet"
app.Version = "0.1.0"
app.Description = "This is how we describe greet the app"
app.Authors = []Author{
{Name: "Harrison", Email: "harrison@lolwut.com"},
{Name: "Oliver Allen", Email: "oliver@toyshop.com"},
}
app.Flags = []Flag{
StringFlag{Name: "name", Value: "bob", Usage: "a name to say"},
}
app.Commands = []Command{
{
Name: "describeit",
Aliases: []string{"d"},
Usage: "use it to see a description",
Description: "This is how we describe describeit the function",
Action: func(c *Context) error {
fmt.Printf("i like to describe things")
return nil
},
},
}
app.Run(os.Args)
// Output:
// NAME:
// greet - A new cli application
//
// USAGE:
// greet [global options] command [command options] [arguments...]
//
// VERSION:
// 0.1.0
//
// DESCRIPTION:
// This is how we describe greet the app
//
// AUTHORS:
// Harrison <harrison@lolwut.com>
// Oliver Allen <oliver@toyshop.com>
//
// COMMANDS:
// describeit, d use it to see a description
// help, h Shows a list of commands or help for one command
//
// GLOBAL OPTIONS:
// --name value a name to say (default: "bob")
// --help, -h show help
// --version, -v print the version
}
func ExampleApp_Run_commandHelp() {
// set args for examples sake
os.Args = []string{"greet", "h", "describeit"}
app := NewApp()
app.Name = "greet"
app.Flags = []Flag{
StringFlag{Name: "name", Value: "bob", Usage: "a name to say"},
}
app.Commands = []Command{
{
Name: "describeit",
Aliases: []string{"d"},
Usage: "use it to see a description",
Description: "This is how we describe describeit the function",
Action: func(c *Context) error {
fmt.Printf("i like to describe things")
return nil
},
},
}
app.Run(os.Args)
// Output:
// NAME:
// greet describeit - use it to see a description
//
// USAGE:
// greet describeit [arguments...]
//
// DESCRIPTION:
// This is how we describe describeit the function
}
func ExampleApp_Run_noAction() {
app := App{}
app.Name = "greet"
app.Run([]string{"greet"})
// Output:
// NAME:
// greet
//
// USAGE:
// [global options] command [command options] [arguments...]
//
// COMMANDS:
// help, h Shows a list of commands or help for one command
//
// GLOBAL OPTIONS:
// --help, -h show help
// --version, -v print the version
}
func ExampleApp_Run_subcommandNoAction() {
app := App{}
app.Name = "greet"
app.Commands = []Command{
{
Name: "describeit",
Aliases: []string{"d"},
Usage: "use it to see a description",
Description: "This is how we describe describeit the function",
},
}
app.Run([]string{"greet", "describeit"})
// Output:
// NAME:
// describeit - use it to see a description
//
// USAGE:
// describeit [arguments...]
//
// DESCRIPTION:
// This is how we describe describeit the function
}
func ExampleApp_Run_bashComplete() {
// set args for examples sake
os.Args = []string{"greet", "--generate-bash-completion"}
app := NewApp()
app.Name = "greet"
app.EnableBashCompletion = true
app.Commands = []Command{
{
Name: "describeit",
Aliases: []string{"d"},
Usage: "use it to see a description",
Description: "This is how we describe describeit the function",
Action: func(c *Context) error {
fmt.Printf("i like to describe things")
return nil
},
}, {
Name: "next",
Usage: "next example",
Description: "more stuff to see when generating bash completion",
Action: func(c *Context) error {
fmt.Printf("the next example")
return nil
},
},
}
app.Run(os.Args)
// Output:
// describeit
// d
// next
// help
// h
}
func ExampleApp_Run_zshComplete() {
// set args for examples sake
os.Args = []string{"greet", "--generate-bash-completion"}
os.Setenv("_CLI_ZSH_AUTOCOMPLETE_HACK", "1")
app := NewApp()
app.Name = "greet"
app.EnableBashCompletion = true
app.Commands = []Command{
{
Name: "describeit",
Aliases: []string{"d"},
Usage: "use it to see a description",
Description: "This is how we describe describeit the function",
Action: func(c *Context) error {
fmt.Printf("i like to describe things")
return nil
},
}, {
Name: "next",
Usage: "next example",
Description: "more stuff to see when generating bash completion",
Action: func(c *Context) error {
fmt.Printf("the next example")
return nil
},
},
}
app.Run(os.Args)
// Output:
// describeit:use it to see a description
// d:use it to see a description
// next:next example
// help:Shows a list of commands or help for one command
// h:Shows a list of commands or help for one command
}
func TestApp_Run(t *testing.T) {
s := ""
app := NewApp()
app.Action = func(c *Context) error {
s = s + c.Args().First()
return nil
}
err := app.Run([]string{"command", "foo"})
expect(t, err, nil)
err = app.Run([]string{"command", "bar"})
expect(t, err, nil)
expect(t, s, "foobar")
}
var commandAppTests = []struct {
name string
expected bool
}{
{"foobar", true},
{"batbaz", true},
{"b", true},
{"f", true},
{"bat", false},
{"nothing", false},
}
func TestApp_Command(t *testing.T) {
app := NewApp()
fooCommand := Command{Name: "foobar", Aliases: []string{"f"}}
batCommand := Command{Name: "batbaz", Aliases: []string{"b"}}
app.Commands = []Command{
fooCommand,
batCommand,
}
for _, test := range commandAppTests {
expect(t, app.Command(test.name) != nil, test.expected)
}
}
func TestApp_Setup_defaultsWriter(t *testing.T) {
app := &App{}
app.Setup()
expect(t, app.Writer, os.Stdout)
}
func TestApp_CommandWithArgBeforeFlags(t *testing.T) {
var parsedOption, firstArg string
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
StringFlag{Name: "option", Value: "", Usage: "some option"},
},
Action: func(c *Context) error {
parsedOption = c.String("option")
firstArg = c.Args().First()
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"})
expect(t, parsedOption, "my-option")
expect(t, firstArg, "my-arg")
}
func TestApp_CommandWithArgBeforeBoolFlags(t *testing.T) {
var parsedOption, parsedSecondOption, firstArg string
var parsedBool, parsedSecondBool bool
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
StringFlag{Name: "option", Value: "", Usage: "some option"},
StringFlag{Name: "secondOption", Value: "", Usage: "another option"},
BoolFlag{Name: "boolflag", Usage: "some bool"},
BoolFlag{Name: "b", Usage: "another bool"},
},
Action: func(c *Context) error {
parsedOption = c.String("option")
parsedSecondOption = c.String("secondOption")
parsedBool = c.Bool("boolflag")
parsedSecondBool = c.Bool("b")
firstArg = c.Args().First()
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "--boolflag", "--option", "my-option", "-b", "--secondOption", "fancy-option"})
expect(t, parsedOption, "my-option")
expect(t, parsedSecondOption, "fancy-option")
expect(t, parsedBool, true)
expect(t, parsedSecondBool, true)
expect(t, firstArg, "my-arg")
}
func TestApp_RunAsSubcommandParseFlags(t *testing.T) {
var context *Context
a := NewApp()
a.Commands = []Command{
{
Name: "foo",
Action: func(c *Context) error {
context = c
return nil
},
Flags: []Flag{
StringFlag{
Name: "lang",
Value: "english",
Usage: "language for the greeting",
},
},
Before: func(_ *Context) error { return nil },
},
}
a.Run([]string{"", "foo", "--lang", "spanish", "abcd"})
expect(t, context.Args().Get(0), "abcd")
expect(t, context.String("lang"), "spanish")
}
func TestApp_RunAsSubCommandIncorrectUsage(t *testing.T) {
a := App{
Flags: []Flag{
StringFlag{Name: "--foo"},
},
Writer: bytes.NewBufferString(""),
}
set := flag.NewFlagSet("", flag.ContinueOnError)
set.Parse([]string{"", "---foo"})
c := &Context{flagSet: set}
err := a.RunAsSubcommand(c)
expect(t, err, errors.New("bad flag syntax: ---foo"))
}
func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) {
var parsedOption string
var args []string
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
StringFlag{Name: "option", Value: "", Usage: "some option"},
},
Action: func(c *Context) error {
parsedOption = c.String("option")
args = c.Args()
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "--option", "my-option", "--", "--notARealFlag"})
expect(t, parsedOption, "my-option")
expect(t, args[0], "my-arg")
expect(t, args[1], "--")
expect(t, args[2], "--notARealFlag")
}
func TestApp_CommandWithDash(t *testing.T) {
var args []string
app := NewApp()
command := Command{
Name: "cmd",
Action: func(c *Context) error {
args = c.Args()
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "-"})
expect(t, args[0], "my-arg")
expect(t, args[1], "-")
}
func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) {
var args []string
app := NewApp()
command := Command{
Name: "cmd",
Action: func(c *Context) error {
args = c.Args()
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "--", "notAFlagAtAll"})
expect(t, args[0], "my-arg")
expect(t, args[1], "--")
expect(t, args[2], "notAFlagAtAll")
}
func TestApp_VisibleCommands(t *testing.T) {
app := NewApp()
app.Commands = []Command{
{
Name: "frob",
HelpName: "foo frob",
Action: func(_ *Context) error { return nil },
},
{
Name: "frib",
HelpName: "foo frib",
Hidden: true,
Action: func(_ *Context) error { return nil },
},
}
app.Setup()
expected := []Command{
app.Commands[0],
app.Commands[2], // help
}
actual := app.VisibleCommands()
expect(t, len(expected), len(actual))
for i, actualCommand := range actual {
expectedCommand := expected[i]
if expectedCommand.Action != nil {
// comparing func addresses is OK!
expect(t, fmt.Sprintf("%p", expectedCommand.Action), fmt.Sprintf("%p", actualCommand.Action))
}
// nil out funcs, as they cannot be compared
// (https://github.com/golang/go/issues/8554)
expectedCommand.Action = nil
actualCommand.Action = nil
if !reflect.DeepEqual(expectedCommand, actualCommand) {
t.Errorf("expected\n%#v\n!=\n%#v", expectedCommand, actualCommand)
}
}
}
func TestApp_Float64Flag(t *testing.T) {
var meters float64
app := NewApp()
app.Flags = []Flag{
Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"},
}
app.Action = func(c *Context) error {
meters = c.Float64("height")
return nil
}
app.Run([]string{"", "--height", "1.93"})
expect(t, meters, 1.93)
}
func TestApp_ParseSliceFlags(t *testing.T) {
var parsedIntSlice []int
var parsedStringSlice []string
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
IntSliceFlag{Name: "p", Value: &IntSlice{}, Usage: "set one or more ip addr"},
StringSliceFlag{Name: "ip", Value: &StringSlice{}, Usage: "set one or more ports to open"},
},
Action: func(c *Context) error {
parsedIntSlice = c.IntSlice("p")
parsedStringSlice = c.StringSlice("ip")
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"})
IntsEquals := func(a, b []int) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
StrsEquals := func(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
var expectedIntSlice = []int{22, 80}
var expectedStringSlice = []string{"8.8.8.8", "8.8.4.4"}
if !IntsEquals(parsedIntSlice, expectedIntSlice) {
t.Errorf("%v does not match %v", parsedIntSlice, expectedIntSlice)
}
if !StrsEquals(parsedStringSlice, expectedStringSlice) {
t.Errorf("%v does not match %v", parsedStringSlice, expectedStringSlice)
}
}
func TestApp_ParseSliceFlagsWithMissingValue(t *testing.T) {
var parsedIntSlice []int
var parsedStringSlice []string
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
IntSliceFlag{Name: "a", Usage: "set numbers"},
StringSliceFlag{Name: "str", Usage: "set strings"},
},
Action: func(c *Context) error {
parsedIntSlice = c.IntSlice("a")
parsedStringSlice = c.StringSlice("str")
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "-a", "2", "-str", "A"})
var expectedIntSlice = []int{2}
var expectedStringSlice = []string{"A"}
if parsedIntSlice[0] != expectedIntSlice[0] {
t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0])
}
if parsedStringSlice[0] != expectedStringSlice[0] {
t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0])
}
}
func TestApp_DefaultStdout(t *testing.T) {
app := NewApp()
if app.Writer != os.Stdout {
t.Error("Default output writer not set.")
}
}
type mockWriter struct {
written []byte
}
func (fw *mockWriter) Write(p []byte) (n int, err error) {
if fw.written == nil {
fw.written = p
} else {
fw.written = append(fw.written, p...)
}
return len(p), nil
}
func (fw *mockWriter) GetWritten() (b []byte) {
return fw.written
}
func TestApp_SetStdout(t *testing.T) {
w := &mockWriter{}
app := NewApp()
app.Name = "test"
app.Writer = w
err := app.Run([]string{"help"})
if err != nil {
t.Fatalf("Run error: %s", err)
}
if len(w.written) == 0 {
t.Error("App did not write output to desired writer.")
}
}
func TestApp_BeforeFunc(t *testing.T) {
counts := &opCounts{}
beforeError := fmt.Errorf("fail")
var err error
app := NewApp()
app.Before = func(c *Context) error {
counts.Total++
counts.Before = counts.Total
s := c.String("opt")
if s == "fail" {
return beforeError
}
return nil
}
app.Commands = []Command{
{
Name: "sub",
Action: func(c *Context) error {
counts.Total++
counts.SubCommand = counts.Total
return nil
},
},
}
app.Flags = []Flag{
StringFlag{Name: "opt"},
}
// run with the Before() func succeeding
err = app.Run([]string{"command", "--opt", "succeed", "sub"})
if err != nil {
t.Fatalf("Run error: %s", err)
}
if counts.Before != 1 {
t.Errorf("Before() not executed when expected")
}
if counts.SubCommand != 2 {
t.Errorf("Subcommand not executed when expected")
}
// reset
counts = &opCounts{}
// run with the Before() func failing
err = app.Run([]string{"command", "--opt", "fail", "sub"})
// should be the same error produced by the Before func
if err != beforeError {
t.Errorf("Run error expected, but not received")
}
if counts.Before != 1 {
t.Errorf("Before() not executed when expected")
}
if counts.SubCommand != 0 {
t.Errorf("Subcommand executed when NOT expected")
}
// reset
counts = &opCounts{}
afterError := errors.New("fail again")
app.After = func(_ *Context) error {
return afterError
}
// run with the Before() func failing, wrapped by After()
err = app.Run([]string{"command", "--opt", "fail", "sub"})
// should be the same error produced by the Before func
if _, ok := err.(MultiError); !ok {
t.Errorf("MultiError expected, but not received")
}
if counts.Before != 1 {
t.Errorf("Before() not executed when expected")
}
if counts.SubCommand != 0 {
t.Errorf("Subcommand executed when NOT expected")
}
}
func TestApp_AfterFunc(t *testing.T) {
counts := &opCounts{}
afterError := fmt.Errorf("fail")
var err error
app := NewApp()
app.After = func(c *Context) error {
counts.Total++
counts.After = counts.Total
s := c.String("opt")
if s == "fail" {
return afterError
}
return nil
}
app.Commands = []Command{
{
Name: "sub",
Action: func(c *Context) error {
counts.Total++
counts.SubCommand = counts.Total
return nil
},
},
}
app.Flags = []Flag{
StringFlag{Name: "opt"},
}
// run with the After() func succeeding
err = app.Run([]string{"command", "--opt", "succeed", "sub"})
if err != nil {
t.Fatalf("Run error: %s", err)
}
if counts.After != 2 {
t.Errorf("After() not executed when expected")
}
if counts.SubCommand != 1 {
t.Errorf("Subcommand not executed when expected")
}
// reset
counts = &opCounts{}
// run with the Before() func failing
err = app.Run([]string{"command", "--opt", "fail", "sub"})
// should be the same error produced by the Before func
if err != afterError {
t.Errorf("Run error expected, but not received")
}
if counts.After != 2 {
t.Errorf("After() not executed when expected")
}
if counts.SubCommand != 1 {
t.Errorf("Subcommand not executed when expected")
}
}
func TestAppNoHelpFlag(t *testing.T) {
oldFlag := HelpFlag
defer func() {
HelpFlag = oldFlag
}()
HelpFlag = BoolFlag{}
app := NewApp()
app.Writer = ioutil.Discard
err := app.Run([]string{"test", "-h"})
if err != flag.ErrHelp {
t.Errorf("expected error about missing help flag, but got: %s (%T)", err, err)
}
}
func TestRequiredFlagAppRunBehavior(t *testing.T) {
tdata := []struct {
testCase string
appFlags []Flag
appRunInput []string
appCommands []Command
expectedAnError bool
}{
// assertion: empty input, when a required flag is present, errors
{
testCase: "error_case_empty_input_with_required_flag_on_app",
appRunInput: []string{"myCLI"},
appFlags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
expectedAnError: true,
},
{
testCase: "error_case_empty_input_with_required_flag_on_command",
appRunInput: []string{"myCLI", "myCommand"},
appCommands: []Command{Command{
Name: "myCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
}},
expectedAnError: true,
},
{
testCase: "error_case_empty_input_with_required_flag_on_subcommand",
appRunInput: []string{"myCLI", "myCommand", "mySubCommand"},
appCommands: []Command{Command{
Name: "myCommand",
Subcommands: []Command{Command{
Name: "mySubCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
}},
}},
expectedAnError: true,
},
// assertion: inputing --help, when a required flag is present, does not error
{
testCase: "valid_case_help_input_with_required_flag_on_app",
appRunInput: []string{"myCLI", "--help"},
appFlags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
},
{
testCase: "valid_case_help_input_with_required_flag_on_command",
appRunInput: []string{"myCLI", "myCommand", "--help"},
appCommands: []Command{Command{
Name: "myCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
}},
},
{
testCase: "valid_case_help_input_with_required_flag_on_subcommand",
appRunInput: []string{"myCLI", "myCommand", "mySubCommand", "--help"},
appCommands: []Command{Command{
Name: "myCommand",
Subcommands: []Command{Command{
Name: "mySubCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
}},
}},
},
// assertion: giving optional input, when a required flag is present, errors
{
testCase: "error_case_optional_input_with_required_flag_on_app",
appRunInput: []string{"myCLI", "--optional", "cats"},
appFlags: []Flag{StringFlag{Name: "requiredFlag", Required: true}, StringFlag{Name: "optional"}},
expectedAnError: true,
},
{
testCase: "error_case_optional_input_with_required_flag_on_command",
appRunInput: []string{"myCLI", "myCommand", "--optional", "cats"},
appCommands: []Command{Command{
Name: "myCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}, StringFlag{Name: "optional"}},
}},
expectedAnError: true,
},
{
testCase: "error_case_optional_input_with_required_flag_on_subcommand",
appRunInput: []string{"myCLI", "myCommand", "mySubCommand", "--optional", "cats"},
appCommands: []Command{Command{
Name: "myCommand",
Subcommands: []Command{Command{
Name: "mySubCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}, StringFlag{Name: "optional"}},
}},
}},
expectedAnError: true,
},
// assertion: when a required flag is present, inputting that required flag does not error
{
testCase: "valid_case_required_flag_input_on_app",
appRunInput: []string{"myCLI", "--requiredFlag", "cats"},
appFlags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
},
{
testCase: "valid_case_required_flag_input_on_command",
appRunInput: []string{"myCLI", "myCommand", "--requiredFlag", "cats"},
appCommands: []Command{Command{
Name: "myCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
}},
},
{
testCase: "valid_case_required_flag_input_on_subcommand",
appRunInput: []string{"myCLI", "myCommand", "mySubCommand", "--requiredFlag", "cats"},
appCommands: []Command{Command{
Name: "myCommand",
Subcommands: []Command{Command{
Name: "mySubCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
}},
}},
},
}
for _, test := range tdata {
t.Run(test.testCase, func(t *testing.T) {
// setup - undo HelpPrinter mock when finished
oldPrinter := HelpPrinter
defer func() {
HelpPrinter = oldPrinter
}()
// setup - mock HelpPrinter
var helpPrinterWasCalled = false
HelpPrinter = func(w io.Writer, template string, data interface{}) {
helpPrinterWasCalled = true
}
// setup - app
app := NewApp()
app.Flags = test.appFlags
app.Commands = test.appCommands
// logic under test
err := app.Run(test.appRunInput)
// assertions
if helpPrinterWasCalled == false {
t.Errorf("HelpPrinter expected to be called, but was not")
}
if test.expectedAnError && err == nil {
t.Errorf("expected an error, but there was none")
}
if _, ok := err.(requiredFlagsErr); test.expectedAnError && !ok {
t.Errorf("expected a requiredFlagsErr, but got: %s", err)
}
if !test.expectedAnError && err != nil {
t.Errorf("did not expected an error, but there was one: %s", err)
}
})
}
}
func TestAppHelpPrinter(t *testing.T) {
oldPrinter := HelpPrinter
defer func() {
HelpPrinter = oldPrinter
}()
var wasCalled = false
HelpPrinter = func(w io.Writer, template string, data interface{}) {
wasCalled = true
}
app := NewApp()
app.Run([]string{"-h"})
if wasCalled == false {
t.Errorf("Help printer expected to be called, but was not")
}
}
func TestApp_VersionPrinter(t *testing.T) {
oldPrinter := VersionPrinter
defer func() {
VersionPrinter = oldPrinter
}()
var wasCalled = false
VersionPrinter = func(c *Context) {
wasCalled = true
}
app := NewApp()
ctx := NewContext(app, nil, nil)
ShowVersion(ctx)
if wasCalled == false {
t.Errorf("Version printer expected to be called, but was not")
}
}
func TestApp_CommandNotFound(t *testing.T) {
counts := &opCounts{}
app := NewApp()
app.CommandNotFound = func(c *Context, command string) {
counts.Total++
counts.CommandNotFound = counts.Total
}
app.Commands = []Command{
{
Name: "bar",
Action: func(c *Context) error {
counts.Total++
counts.SubCommand = counts.Total
return nil
},
},
}
app.Run([]string{"command", "foo"})
expect(t, counts.CommandNotFound, 1)
expect(t, counts.SubCommand, 0)
expect(t, counts.Total, 1)
}
func TestApp_OrderOfOperations(t *testing.T) {
counts := &opCounts{}
resetCounts := func() { counts = &opCounts{} }
app := NewApp()
app.EnableBashCompletion = true
app.BashComplete = func(c *Context) {
counts.Total++
counts.BashComplete = counts.Total
}
app.OnUsageError = func(c *Context, err error, isSubcommand bool) error {
counts.Total++
counts.OnUsageError = counts.Total
return errors.New("hay OnUsageError")
}
beforeNoError := func(c *Context) error {
counts.Total++
counts.Before = counts.Total
return nil
}
beforeError := func(c *Context) error {
counts.Total++
counts.Before = counts.Total
return errors.New("hay Before")
}
app.Before = beforeNoError
app.CommandNotFound = func(c *Context, command string) {
counts.Total++
counts.CommandNotFound = counts.Total
}
afterNoError := func(c *Context) error {
counts.Total++
counts.After = counts.Total
return nil
}
afterError := func(c *Context) error {
counts.Total++
counts.After = counts.Total
return errors.New("hay After")
}
app.After = afterNoError
app.Commands = []Command{
{
Name: "bar",
Action: func(c *Context) error {
counts.Total++
counts.SubCommand = counts.Total
return nil
},
},
}
app.Action = func(c *Context) error {
counts.Total++
counts.Action = counts.Total
return nil
}
_ = app.Run([]string{"command", "--nope"})
expect(t, counts.OnUsageError, 1)
expect(t, counts.Total, 1)
resetCounts()
_ = app.Run([]string{"command", "--generate-bash-completion"})
expect(t, counts.BashComplete, 1)
expect(t, counts.Total, 1)
resetCounts()
oldOnUsageError := app.OnUsageError
app.OnUsageError = nil
_ = app.Run([]string{"command", "--nope"})
expect(t, counts.Total, 0)
app.OnUsageError = oldOnUsageError
resetCounts()
_ = app.Run([]string{"command", "foo"})
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.CommandNotFound, 0)
expect(t, counts.Action, 2)
expect(t, counts.After, 3)
expect(t, counts.Total, 3)
resetCounts()
app.Before = beforeError
_ = app.Run([]string{"command", "bar"})
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.After, 2)
expect(t, counts.Total, 2)
app.Before = beforeNoError
resetCounts()
app.After = nil
_ = app.Run([]string{"command", "bar"})
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.SubCommand, 2)
expect(t, counts.Total, 2)
app.After = afterNoError
resetCounts()
app.After = afterError
err := app.Run([]string{"command", "bar"})
if err == nil {
t.Fatalf("expected a non-nil error")
}
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.SubCommand, 2)
expect(t, counts.After, 3)
expect(t, counts.Total, 3)
app.After = afterNoError
resetCounts()
oldCommands := app.Commands
app.Commands = nil
_ = app.Run([]string{"command"})
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.Action, 2)
expect(t, counts.After, 3)
expect(t, counts.Total, 3)
app.Commands = oldCommands
}
func TestApp_Run_CommandWithSubcommandHasHelpTopic(t *testing.T) {
var subcommandHelpTopics = [][]string{
{"command", "foo", "--help"},
{"command", "foo", "-h"},
{"command", "foo", "help"},
}
for _, flagSet := range subcommandHelpTopics {
t.Logf("==> checking with flags %v", flagSet)
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
subCmdBar := Command{
Name: "bar",
Usage: "does bar things",
}
subCmdBaz := Command{
Name: "baz",
Usage: "does baz things",
}
cmd := Command{
Name: "foo",
Description: "descriptive wall of text about how it does foo things",
Subcommands: []Command{subCmdBar, subCmdBaz},
Action: func(c *Context) error { return nil },
}
app.Commands = []Command{cmd}
err := app.Run(flagSet)
if err != nil {
t.Error(err)
}
output := buf.String()
t.Logf("output: %q\n", buf.Bytes())
if strings.Contains(output, "No help topic for") {
t.Errorf("expect a help topic, got none: \n%q", output)
}
for _, shouldContain := range []string{
cmd.Name, cmd.Description,
subCmdBar.Name, subCmdBar.Usage,
subCmdBaz.Name, subCmdBaz.Usage,
} {
if !strings.Contains(output, shouldContain) {
t.Errorf("want help to contain %q, did not: \n%q", shouldContain, output)
}
}
}
}
func TestApp_Run_SubcommandFullPath(t *testing.T) {
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
app.Name = "command"
subCmd := Command{
Name: "bar",
Usage: "does bar things",
}
cmd := Command{
Name: "foo",
Description: "foo commands",
Subcommands: []Command{subCmd},
}
app.Commands = []Command{cmd}
err := app.Run([]string{"command", "foo", "bar", "--help"})
if err != nil {
t.Error(err)
}
output := buf.String()
if !strings.Contains(output, "command foo bar - does bar things") {
t.Errorf("expected full path to subcommand: %s", output)
}
if !strings.Contains(output, "command foo bar [arguments...]") {
t.Errorf("expected full path to subcommand: %s", output)
}
}
func TestApp_Run_SubcommandHelpName(t *testing.T) {
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
app.Name = "command"
subCmd := Command{
Name: "bar",
HelpName: "custom",
Usage: "does bar things",
}
cmd := Command{
Name: "foo",
Description: "foo commands",
Subcommands: []Command{subCmd},
}
app.Commands = []Command{cmd}
err := app.Run([]string{"command", "foo", "bar", "--help"})
if err != nil {
t.Error(err)
}
output := buf.String()
if !strings.Contains(output, "custom - does bar things") {
t.Errorf("expected HelpName for subcommand: %s", output)
}
if !strings.Contains(output, "custom [arguments...]") {
t.Errorf("expected HelpName to subcommand: %s", output)
}
}
func TestApp_Run_CommandHelpName(t *testing.T) {
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
app.Name = "command"
subCmd := Command{
Name: "bar",
Usage: "does bar things",
}
cmd := Command{
Name: "foo",
HelpName: "custom",
Description: "foo commands",
Subcommands: []Command{subCmd},
}
app.Commands = []Command{cmd}
err := app.Run([]string{"command", "foo", "bar", "--help"})
if err != nil {
t.Error(err)
}
output := buf.String()
if !strings.Contains(output, "command foo bar - does bar things") {
t.Errorf("expected full path to subcommand: %s", output)
}
if !strings.Contains(output, "command foo bar [arguments...]") {
t.Errorf("expected full path to subcommand: %s", output)
}
}
func TestApp_Run_CommandSubcommandHelpName(t *testing.T) {
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
app.Name = "base"
subCmd := Command{
Name: "bar",
HelpName: "custom",
Usage: "does bar things",
}
cmd := Command{
Name: "foo",
Description: "foo commands",
Subcommands: []Command{subCmd},
}
app.Commands = []Command{cmd}
err := app.Run([]string{"command", "foo", "--help"})
if err != nil {
t.Error(err)
}
output := buf.String()
if !strings.Contains(output, "base foo - foo commands") {
t.Errorf("expected full path to subcommand: %s", output)
}
if !strings.Contains(output, "base foo command [command options] [arguments...]") {
t.Errorf("expected full path to subcommand: %s", output)
}
}
func TestApp_Run_Help(t *testing.T) {
var helpArguments = [][]string{{"boom", "--help"}, {"boom", "-h"}, {"boom", "help"}}
for _, args := range helpArguments {
buf := new(bytes.Buffer)
t.Logf("==> checking with arguments %v", args)
app := NewApp()
app.Name = "boom"
app.Usage = "make an explosive entrance"
app.Writer = buf
app.Action = func(c *Context) error {
buf.WriteString("boom I say!")
return nil
}
err := app.Run(args)
if err != nil {
t.Error(err)
}
output := buf.String()
t.Logf("output: %q\n", buf.Bytes())
if !strings.Contains(output, "boom - make an explosive entrance") {
t.Errorf("want help to contain %q, did not: \n%q", "boom - make an explosive entrance", output)
}
}
}
func TestApp_Run_Version(t *testing.T) {
var versionArguments = [][]string{{"boom", "--version"}, {"boom", "-v"}}
for _, args := range versionArguments {
buf := new(bytes.Buffer)
t.Logf("==> checking with arguments %v", args)
app := NewApp()
app.Name = "boom"
app.Usage = "make an explosive entrance"
app.Version = "0.1.0"
app.Writer = buf
app.Action = func(c *Context) error {
buf.WriteString("boom I say!")
return nil
}
err := app.Run(args)
if err != nil {
t.Error(err)
}
output := buf.String()
t.Logf("output: %q\n", buf.Bytes())
if !strings.Contains(output, "0.1.0") {
t.Errorf("want version to contain %q, did not: \n%q", "0.1.0", output)
}
}
}
func TestApp_Run_Categories(t *testing.T) {
app := NewApp()
app.Name = "categories"
app.HideHelp = true
app.Commands = []Command{
{
Name: "command1",
Category: "1",
},
{
Name: "command2",
Category: "1",
},
{
Name: "command3",
Category: "2",
},
}
buf := new(bytes.Buffer)
app.Writer = buf
app.Run([]string{"categories"})
expect := CommandCategories{
&CommandCategory{
Name: "1",
Commands: []Command{
app.Commands[0],
app.Commands[1],
},
},
&CommandCategory{
Name: "2",
Commands: []Command{
app.Commands[2],
},
},
}
if !reflect.DeepEqual(app.Categories(), expect) {
t.Fatalf("expected categories %#v, to equal %#v", app.Categories(), expect)
}
output := buf.String()
t.Logf("output: %q\n", buf.Bytes())
if !strings.Contains(output, "1:\n command1") {
t.Errorf("want buffer to include category %q, did not: \n%q", "1:\n command1", output)
}
}
func TestApp_VisibleCategories(t *testing.T) {
app := NewApp()
app.Name = "visible-categories"
app.HideHelp = true
app.Commands = []Command{
{
Name: "command1",
Category: "1",
HelpName: "foo command1",
Hidden: true,
},
{
Name: "command2",
Category: "2",
HelpName: "foo command2",
},
{
Name: "command3",
Category: "3",
HelpName: "foo command3",
},
}
expected := []*CommandCategory{
{
Name: "2",
Commands: []Command{
app.Commands[1],
},
},
{
Name: "3",
Commands: []Command{
app.Commands[2],
},
},
}
app.Setup()
expect(t, expected, app.VisibleCategories())
app = NewApp()
app.Name = "visible-categories"
app.HideHelp = true
app.Commands = []Command{
{
Name: "command1",
Category: "1",
HelpName: "foo command1",
Hidden: true,
},
{
Name: "command2",
Category: "2",
HelpName: "foo command2",
Hidden: true,
},
{
Name: "command3",
Category: "3",
HelpName: "foo command3",
},
}
expected = []*CommandCategory{
{
Name: "3",
Commands: []Command{
app.Commands[2],
},
},
}
app.Setup()
expect(t, expected, app.VisibleCategories())
app = NewApp()
app.Name = "visible-categories"
app.HideHelp = true
app.Commands = []Command{
{
Name: "command1",
Category: "1",
HelpName: "foo command1",
Hidden: true,
},
{
Name: "command2",
Category: "2",
HelpName: "foo command2",
Hidden: true,
},
{
Name: "command3",
Category: "3",
HelpName: "foo command3",
Hidden: true,
},
}
expected = []*CommandCategory{}
app.Setup()
expect(t, expected, app.VisibleCategories())
}
func TestApp_Run_DoesNotOverwriteErrorFromBefore(t *testing.T) {
app := NewApp()
app.Action = func(c *Context) error { return nil }
app.Before = func(c *Context) error { return fmt.Errorf("before error") }
app.After = func(c *Context) error { return fmt.Errorf("after error") }
err := app.Run([]string{"foo"})
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
if !strings.Contains(err.Error(), "before error") {
t.Errorf("expected text of error from Before method, but got none in \"%v\"", err)
}
if !strings.Contains(err.Error(), "after error") {
t.Errorf("expected text of error from After method, but got none in \"%v\"", err)
}
}
func TestApp_Run_SubcommandDoesNotOverwriteErrorFromBefore(t *testing.T) {
app := NewApp()
app.Commands = []Command{
{
Subcommands: []Command{
{
Name: "sub",
},
},
Name: "bar",
Before: func(c *Context) error { return fmt.Errorf("before error") },
After: func(c *Context) error { return fmt.Errorf("after error") },
},
}
err := app.Run([]string{"foo", "bar"})
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
if !strings.Contains(err.Error(), "before error") {
t.Errorf("expected text of error from Before method, but got none in \"%v\"", err)
}
if !strings.Contains(err.Error(), "after error") {
t.Errorf("expected text of error from After method, but got none in \"%v\"", err)
}
}
func TestApp_OnUsageError_WithWrongFlagValue(t *testing.T) {
app := NewApp()
app.Flags = []Flag{
IntFlag{Name: "flag"},
}
app.OnUsageError = func(c *Context, err error, isSubcommand bool) error {
if isSubcommand {
t.Errorf("Expect no subcommand")
}
if !strings.HasPrefix(err.Error(), "invalid value \"wrong\"") {
t.Errorf("Expect an invalid value error, but got \"%v\"", err)
}
return errors.New("intercepted: " + err.Error())
}
app.Commands = []Command{
{
Name: "bar",
},
}
err := app.Run([]string{"foo", "--flag=wrong"})
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
if !strings.HasPrefix(err.Error(), "intercepted: invalid value") {
t.Errorf("Expect an intercepted error, but got \"%v\"", err)
}
}
func TestApp_OnUsageError_WithWrongFlagValue_ForSubcommand(t *testing.T) {
app := NewApp()
app.Flags = []Flag{
IntFlag{Name: "flag"},
}
app.OnUsageError = func(c *Context, err error, isSubcommand bool) error {
if isSubcommand {
t.Errorf("Expect subcommand")
}
if !strings.HasPrefix(err.Error(), "invalid value \"wrong\"") {
t.Errorf("Expect an invalid value error, but got \"%v\"", err)
}
return errors.New("intercepted: " + err.Error())
}
app.Commands = []Command{
{
Name: "bar",
},
}
err := app.Run([]string{"foo", "--flag=wrong", "bar"})
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
if !strings.HasPrefix(err.Error(), "intercepted: invalid value") {
t.Errorf("Expect an intercepted error, but got \"%v\"", err)
}
}
// A custom flag that conforms to the relevant interfaces, but has none of the
// fields that the other flag types do.
type customBoolFlag struct {
Nombre string
}
// Don't use the normal FlagStringer
func (c *customBoolFlag) String() string {
return "***" + c.Nombre + "***"
}
func (c *customBoolFlag) GetName() string {
return c.Nombre
}
func (c *customBoolFlag) Apply(set *flag.FlagSet) {
set.String(c.Nombre, c.Nombre, "")
}
func TestCustomFlagsUnused(t *testing.T) {
app := NewApp()
app.Flags = []Flag{&customBoolFlag{"custom"}}
err := app.Run([]string{"foo"})
if err != nil {
t.Errorf("Run returned unexpected error: %v", err)
}
}
func TestCustomFlagsUsed(t *testing.T) {
app := NewApp()
app.Flags = []Flag{&customBoolFlag{"custom"}}
err := app.Run([]string{"foo", "--custom=bar"})
if err != nil {
t.Errorf("Run returned unexpected error: %v", err)
}
}
func TestCustomHelpVersionFlags(t *testing.T) {
app := NewApp()
// Be sure to reset the global flags
defer func(helpFlag Flag, versionFlag Flag) {
HelpFlag = helpFlag
VersionFlag = versionFlag
}(HelpFlag, VersionFlag)
HelpFlag = &customBoolFlag{"help-custom"}
VersionFlag = &customBoolFlag{"version-custom"}
err := app.Run([]string{"foo", "--help-custom=bar"})
if err != nil {
t.Errorf("Run returned unexpected error: %v", err)
}
}
func TestHandleAction_WithNonFuncAction(t *testing.T) {
app := NewApp()
app.Action = 42
fs, err := flagSet(app.Name, app.Flags)
if err != nil {
t.Errorf("error creating FlagSet: %s", err)
}
err = HandleAction(app.Action, NewContext(app, fs, nil))
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
exitErr, ok := err.(*ExitError)
if !ok {
t.Fatalf("expected to receive a *ExitError")
}
if !strings.HasPrefix(exitErr.Error(), "ERROR invalid Action type.") {
t.Fatalf("expected an unknown Action error, but got: %v", exitErr.Error())
}
if exitErr.ExitCode() != 2 {
t.Fatalf("expected error exit code to be 2, but got: %v", exitErr.ExitCode())
}
}
func TestHandleAction_WithInvalidFuncSignature(t *testing.T) {
app := NewApp()
app.Action = func() string { return "" }
fs, err := flagSet(app.Name, app.Flags)
if err != nil {
t.Errorf("error creating FlagSet: %s", err)
}
err = HandleAction(app.Action, NewContext(app, fs, nil))
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
exitErr, ok := err.(*ExitError)
if !ok {
t.Fatalf("expected to receive a *ExitError")
}
if !strings.HasPrefix(exitErr.Error(), "ERROR invalid Action type") {
t.Fatalf("expected an unknown Action error, but got: %v", exitErr.Error())
}
if exitErr.ExitCode() != 2 {
t.Fatalf("expected error exit code to be 2, but got: %v", exitErr.ExitCode())
}
}
func TestHandleAction_WithInvalidFuncReturnSignature(t *testing.T) {
app := NewApp()
app.Action = func(_ *Context) (int, error) { return 0, nil }
fs, err := flagSet(app.Name, app.Flags)
if err != nil {
t.Errorf("error creating FlagSet: %s", err)
}
err = HandleAction(app.Action, NewContext(app, fs, nil))
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
exitErr, ok := err.(*ExitError)
if !ok {
t.Fatalf("expected to receive a *ExitError")
}
if !strings.HasPrefix(exitErr.Error(), "ERROR invalid Action type") {
t.Fatalf("expected an invalid Action signature error, but got: %v", exitErr.Error())
}
if exitErr.ExitCode() != 2 {
t.Fatalf("expected error exit code to be 2, but got: %v", exitErr.ExitCode())
}
}
func TestHandleExitCoder_Default(t *testing.T) {
app := NewApp()
fs, err := flagSet(app.Name, app.Flags)
if err != nil {
t.Errorf("error creating FlagSet: %s", err)
}
ctx := NewContext(app, fs, nil)
app.handleExitCoder(ctx, NewExitError("Default Behavior Error", 42))
output := fakeErrWriter.String()
if !strings.Contains(output, "Default") {
t.Fatalf("Expected Default Behavior from Error Handler but got: %s", output)
}
}
func TestHandleExitCoder_Custom(t *testing.T) {
app := NewApp()
fs, err := flagSet(app.Name, app.Flags)
if err != nil {
t.Errorf("error creating FlagSet: %s", err)
}
app.ExitErrHandler = func(_ *Context, _ error) {
fmt.Fprintln(ErrWriter, "I'm a Custom error handler, I print what I want!")
}
ctx := NewContext(app, fs, nil)
app.handleExitCoder(ctx, NewExitError("Default Behavior Error", 42))
output := fakeErrWriter.String()
if !strings.Contains(output, "Custom") {
t.Fatalf("Expected Custom Behavior from Error Handler but got: %s", output)
}
}
func TestHandleAction_WithUnknownPanic(t *testing.T) {
defer func() { refute(t, recover(), nil) }()
var fn ActionFunc
app := NewApp()
app.Action = func(ctx *Context) error {
fn(ctx)
return nil
}
fs, err := flagSet(app.Name, app.Flags)
if err != nil {
t.Errorf("error creating FlagSet: %s", err)
}
HandleAction(app.Action, NewContext(app, fs, nil))
}
func TestShellCompletionForIncompleteFlags(t *testing.T) {
app := NewApp()
app.Flags = []Flag{
IntFlag{
Name: "test-completion",
},
}
app.EnableBashCompletion = true
app.BashComplete = func(ctx *Context) {
for _, command := range ctx.App.Commands {
if command.Hidden {
continue
}
for _, name := range command.Names() {
fmt.Fprintln(ctx.App.Writer, name)
}
}
for _, flag := range ctx.App.Flags {
for _, name := range strings.Split(flag.GetName(), ",") {
if name == BashCompletionFlag.GetName() {
continue
}
switch name = strings.TrimSpace(name); len(name) {
case 0:
case 1:
fmt.Fprintln(ctx.App.Writer, "-"+name)
default:
fmt.Fprintln(ctx.App.Writer, "--"+name)
}
}
}
}
app.Action = func(ctx *Context) error {
return fmt.Errorf("should not get here")
}
err := app.Run([]string{"", "--test-completion", "--" + BashCompletionFlag.GetName()})
if err != nil {
t.Errorf("app should not return an error: %s", err)
}
}
func TestHandleActionActuallyWorksWithActions(t *testing.T) {
var f ActionFunc
called := false
f = func(c *Context) error {
called = true
return nil
}
err := HandleAction(f, nil)
if err != nil {
t.Errorf("Should not have errored: %v", err)
}
if !called {
t.Errorf("Function was not called")
}
}
remove help assertion stuff
package cli
import (
"bytes"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"strings"
"testing"
)
var (
lastExitCode = 0
fakeOsExiter = func(rc int) {
lastExitCode = rc
}
fakeErrWriter = &bytes.Buffer{}
)
func init() {
OsExiter = fakeOsExiter
ErrWriter = fakeErrWriter
}
type opCounts struct {
Total, BashComplete, OnUsageError, Before, CommandNotFound, Action, After, SubCommand int
}
func ExampleApp_Run() {
// set args for examples sake
os.Args = []string{"greet", "--name", "Jeremy"}
app := NewApp()
app.Name = "greet"
app.Flags = []Flag{
StringFlag{Name: "name", Value: "bob", Usage: "a name to say"},
}
app.Action = func(c *Context) error {
fmt.Printf("Hello %v\n", c.String("name"))
return nil
}
app.UsageText = "app [first_arg] [second_arg]"
app.Author = "Harrison"
app.Email = "harrison@lolwut.com"
app.Authors = []Author{{Name: "Oliver Allen", Email: "oliver@toyshop.com"}}
app.Run(os.Args)
// Output:
// Hello Jeremy
}
func ExampleApp_Run_subcommand() {
// set args for examples sake
os.Args = []string{"say", "hi", "english", "--name", "Jeremy"}
app := NewApp()
app.Name = "say"
app.Commands = []Command{
{
Name: "hello",
Aliases: []string{"hi"},
Usage: "use it to see a description",
Description: "This is how we describe hello the function",
Subcommands: []Command{
{
Name: "english",
Aliases: []string{"en"},
Usage: "sends a greeting in english",
Description: "greets someone in english",
Flags: []Flag{
StringFlag{
Name: "name",
Value: "Bob",
Usage: "Name of the person to greet",
},
},
Action: func(c *Context) error {
fmt.Println("Hello,", c.String("name"))
return nil
},
},
},
},
}
app.Run(os.Args)
// Output:
// Hello, Jeremy
}
func ExampleApp_Run_appHelp() {
// set args for examples sake
os.Args = []string{"greet", "help"}
app := NewApp()
app.Name = "greet"
app.Version = "0.1.0"
app.Description = "This is how we describe greet the app"
app.Authors = []Author{
{Name: "Harrison", Email: "harrison@lolwut.com"},
{Name: "Oliver Allen", Email: "oliver@toyshop.com"},
}
app.Flags = []Flag{
StringFlag{Name: "name", Value: "bob", Usage: "a name to say"},
}
app.Commands = []Command{
{
Name: "describeit",
Aliases: []string{"d"},
Usage: "use it to see a description",
Description: "This is how we describe describeit the function",
Action: func(c *Context) error {
fmt.Printf("i like to describe things")
return nil
},
},
}
app.Run(os.Args)
// Output:
// NAME:
// greet - A new cli application
//
// USAGE:
// greet [global options] command [command options] [arguments...]
//
// VERSION:
// 0.1.0
//
// DESCRIPTION:
// This is how we describe greet the app
//
// AUTHORS:
// Harrison <harrison@lolwut.com>
// Oliver Allen <oliver@toyshop.com>
//
// COMMANDS:
// describeit, d use it to see a description
// help, h Shows a list of commands or help for one command
//
// GLOBAL OPTIONS:
// --name value a name to say (default: "bob")
// --help, -h show help
// --version, -v print the version
}
func ExampleApp_Run_commandHelp() {
// set args for examples sake
os.Args = []string{"greet", "h", "describeit"}
app := NewApp()
app.Name = "greet"
app.Flags = []Flag{
StringFlag{Name: "name", Value: "bob", Usage: "a name to say"},
}
app.Commands = []Command{
{
Name: "describeit",
Aliases: []string{"d"},
Usage: "use it to see a description",
Description: "This is how we describe describeit the function",
Action: func(c *Context) error {
fmt.Printf("i like to describe things")
return nil
},
},
}
app.Run(os.Args)
// Output:
// NAME:
// greet describeit - use it to see a description
//
// USAGE:
// greet describeit [arguments...]
//
// DESCRIPTION:
// This is how we describe describeit the function
}
func ExampleApp_Run_noAction() {
app := App{}
app.Name = "greet"
app.Run([]string{"greet"})
// Output:
// NAME:
// greet
//
// USAGE:
// [global options] command [command options] [arguments...]
//
// COMMANDS:
// help, h Shows a list of commands or help for one command
//
// GLOBAL OPTIONS:
// --help, -h show help
// --version, -v print the version
}
func ExampleApp_Run_subcommandNoAction() {
app := App{}
app.Name = "greet"
app.Commands = []Command{
{
Name: "describeit",
Aliases: []string{"d"},
Usage: "use it to see a description",
Description: "This is how we describe describeit the function",
},
}
app.Run([]string{"greet", "describeit"})
// Output:
// NAME:
// describeit - use it to see a description
//
// USAGE:
// describeit [arguments...]
//
// DESCRIPTION:
// This is how we describe describeit the function
}
func ExampleApp_Run_bashComplete() {
// set args for examples sake
os.Args = []string{"greet", "--generate-bash-completion"}
app := NewApp()
app.Name = "greet"
app.EnableBashCompletion = true
app.Commands = []Command{
{
Name: "describeit",
Aliases: []string{"d"},
Usage: "use it to see a description",
Description: "This is how we describe describeit the function",
Action: func(c *Context) error {
fmt.Printf("i like to describe things")
return nil
},
}, {
Name: "next",
Usage: "next example",
Description: "more stuff to see when generating bash completion",
Action: func(c *Context) error {
fmt.Printf("the next example")
return nil
},
},
}
app.Run(os.Args)
// Output:
// describeit
// d
// next
// help
// h
}
func ExampleApp_Run_zshComplete() {
// set args for examples sake
os.Args = []string{"greet", "--generate-bash-completion"}
os.Setenv("_CLI_ZSH_AUTOCOMPLETE_HACK", "1")
app := NewApp()
app.Name = "greet"
app.EnableBashCompletion = true
app.Commands = []Command{
{
Name: "describeit",
Aliases: []string{"d"},
Usage: "use it to see a description",
Description: "This is how we describe describeit the function",
Action: func(c *Context) error {
fmt.Printf("i like to describe things")
return nil
},
}, {
Name: "next",
Usage: "next example",
Description: "more stuff to see when generating bash completion",
Action: func(c *Context) error {
fmt.Printf("the next example")
return nil
},
},
}
app.Run(os.Args)
// Output:
// describeit:use it to see a description
// d:use it to see a description
// next:next example
// help:Shows a list of commands or help for one command
// h:Shows a list of commands or help for one command
}
func TestApp_Run(t *testing.T) {
s := ""
app := NewApp()
app.Action = func(c *Context) error {
s = s + c.Args().First()
return nil
}
err := app.Run([]string{"command", "foo"})
expect(t, err, nil)
err = app.Run([]string{"command", "bar"})
expect(t, err, nil)
expect(t, s, "foobar")
}
var commandAppTests = []struct {
name string
expected bool
}{
{"foobar", true},
{"batbaz", true},
{"b", true},
{"f", true},
{"bat", false},
{"nothing", false},
}
func TestApp_Command(t *testing.T) {
app := NewApp()
fooCommand := Command{Name: "foobar", Aliases: []string{"f"}}
batCommand := Command{Name: "batbaz", Aliases: []string{"b"}}
app.Commands = []Command{
fooCommand,
batCommand,
}
for _, test := range commandAppTests {
expect(t, app.Command(test.name) != nil, test.expected)
}
}
func TestApp_Setup_defaultsWriter(t *testing.T) {
app := &App{}
app.Setup()
expect(t, app.Writer, os.Stdout)
}
func TestApp_CommandWithArgBeforeFlags(t *testing.T) {
var parsedOption, firstArg string
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
StringFlag{Name: "option", Value: "", Usage: "some option"},
},
Action: func(c *Context) error {
parsedOption = c.String("option")
firstArg = c.Args().First()
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "--option", "my-option"})
expect(t, parsedOption, "my-option")
expect(t, firstArg, "my-arg")
}
func TestApp_CommandWithArgBeforeBoolFlags(t *testing.T) {
var parsedOption, parsedSecondOption, firstArg string
var parsedBool, parsedSecondBool bool
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
StringFlag{Name: "option", Value: "", Usage: "some option"},
StringFlag{Name: "secondOption", Value: "", Usage: "another option"},
BoolFlag{Name: "boolflag", Usage: "some bool"},
BoolFlag{Name: "b", Usage: "another bool"},
},
Action: func(c *Context) error {
parsedOption = c.String("option")
parsedSecondOption = c.String("secondOption")
parsedBool = c.Bool("boolflag")
parsedSecondBool = c.Bool("b")
firstArg = c.Args().First()
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "--boolflag", "--option", "my-option", "-b", "--secondOption", "fancy-option"})
expect(t, parsedOption, "my-option")
expect(t, parsedSecondOption, "fancy-option")
expect(t, parsedBool, true)
expect(t, parsedSecondBool, true)
expect(t, firstArg, "my-arg")
}
func TestApp_RunAsSubcommandParseFlags(t *testing.T) {
var context *Context
a := NewApp()
a.Commands = []Command{
{
Name: "foo",
Action: func(c *Context) error {
context = c
return nil
},
Flags: []Flag{
StringFlag{
Name: "lang",
Value: "english",
Usage: "language for the greeting",
},
},
Before: func(_ *Context) error { return nil },
},
}
a.Run([]string{"", "foo", "--lang", "spanish", "abcd"})
expect(t, context.Args().Get(0), "abcd")
expect(t, context.String("lang"), "spanish")
}
func TestApp_RunAsSubCommandIncorrectUsage(t *testing.T) {
a := App{
Flags: []Flag{
StringFlag{Name: "--foo"},
},
Writer: bytes.NewBufferString(""),
}
set := flag.NewFlagSet("", flag.ContinueOnError)
set.Parse([]string{"", "---foo"})
c := &Context{flagSet: set}
err := a.RunAsSubcommand(c)
expect(t, err, errors.New("bad flag syntax: ---foo"))
}
func TestApp_CommandWithFlagBeforeTerminator(t *testing.T) {
var parsedOption string
var args []string
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
StringFlag{Name: "option", Value: "", Usage: "some option"},
},
Action: func(c *Context) error {
parsedOption = c.String("option")
args = c.Args()
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "--option", "my-option", "--", "--notARealFlag"})
expect(t, parsedOption, "my-option")
expect(t, args[0], "my-arg")
expect(t, args[1], "--")
expect(t, args[2], "--notARealFlag")
}
func TestApp_CommandWithDash(t *testing.T) {
var args []string
app := NewApp()
command := Command{
Name: "cmd",
Action: func(c *Context) error {
args = c.Args()
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "-"})
expect(t, args[0], "my-arg")
expect(t, args[1], "-")
}
func TestApp_CommandWithNoFlagBeforeTerminator(t *testing.T) {
var args []string
app := NewApp()
command := Command{
Name: "cmd",
Action: func(c *Context) error {
args = c.Args()
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "--", "notAFlagAtAll"})
expect(t, args[0], "my-arg")
expect(t, args[1], "--")
expect(t, args[2], "notAFlagAtAll")
}
func TestApp_VisibleCommands(t *testing.T) {
app := NewApp()
app.Commands = []Command{
{
Name: "frob",
HelpName: "foo frob",
Action: func(_ *Context) error { return nil },
},
{
Name: "frib",
HelpName: "foo frib",
Hidden: true,
Action: func(_ *Context) error { return nil },
},
}
app.Setup()
expected := []Command{
app.Commands[0],
app.Commands[2], // help
}
actual := app.VisibleCommands()
expect(t, len(expected), len(actual))
for i, actualCommand := range actual {
expectedCommand := expected[i]
if expectedCommand.Action != nil {
// comparing func addresses is OK!
expect(t, fmt.Sprintf("%p", expectedCommand.Action), fmt.Sprintf("%p", actualCommand.Action))
}
// nil out funcs, as they cannot be compared
// (https://github.com/golang/go/issues/8554)
expectedCommand.Action = nil
actualCommand.Action = nil
if !reflect.DeepEqual(expectedCommand, actualCommand) {
t.Errorf("expected\n%#v\n!=\n%#v", expectedCommand, actualCommand)
}
}
}
func TestApp_Float64Flag(t *testing.T) {
var meters float64
app := NewApp()
app.Flags = []Flag{
Float64Flag{Name: "height", Value: 1.5, Usage: "Set the height, in meters"},
}
app.Action = func(c *Context) error {
meters = c.Float64("height")
return nil
}
app.Run([]string{"", "--height", "1.93"})
expect(t, meters, 1.93)
}
func TestApp_ParseSliceFlags(t *testing.T) {
var parsedIntSlice []int
var parsedStringSlice []string
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
IntSliceFlag{Name: "p", Value: &IntSlice{}, Usage: "set one or more ip addr"},
StringSliceFlag{Name: "ip", Value: &StringSlice{}, Usage: "set one or more ports to open"},
},
Action: func(c *Context) error {
parsedIntSlice = c.IntSlice("p")
parsedStringSlice = c.StringSlice("ip")
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "-p", "22", "-p", "80", "-ip", "8.8.8.8", "-ip", "8.8.4.4"})
IntsEquals := func(a, b []int) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
StrsEquals := func(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
var expectedIntSlice = []int{22, 80}
var expectedStringSlice = []string{"8.8.8.8", "8.8.4.4"}
if !IntsEquals(parsedIntSlice, expectedIntSlice) {
t.Errorf("%v does not match %v", parsedIntSlice, expectedIntSlice)
}
if !StrsEquals(parsedStringSlice, expectedStringSlice) {
t.Errorf("%v does not match %v", parsedStringSlice, expectedStringSlice)
}
}
func TestApp_ParseSliceFlagsWithMissingValue(t *testing.T) {
var parsedIntSlice []int
var parsedStringSlice []string
app := NewApp()
command := Command{
Name: "cmd",
Flags: []Flag{
IntSliceFlag{Name: "a", Usage: "set numbers"},
StringSliceFlag{Name: "str", Usage: "set strings"},
},
Action: func(c *Context) error {
parsedIntSlice = c.IntSlice("a")
parsedStringSlice = c.StringSlice("str")
return nil
},
}
app.Commands = []Command{command}
app.Run([]string{"", "cmd", "my-arg", "-a", "2", "-str", "A"})
var expectedIntSlice = []int{2}
var expectedStringSlice = []string{"A"}
if parsedIntSlice[0] != expectedIntSlice[0] {
t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0])
}
if parsedStringSlice[0] != expectedStringSlice[0] {
t.Errorf("%v does not match %v", parsedIntSlice[0], expectedIntSlice[0])
}
}
func TestApp_DefaultStdout(t *testing.T) {
app := NewApp()
if app.Writer != os.Stdout {
t.Error("Default output writer not set.")
}
}
type mockWriter struct {
written []byte
}
func (fw *mockWriter) Write(p []byte) (n int, err error) {
if fw.written == nil {
fw.written = p
} else {
fw.written = append(fw.written, p...)
}
return len(p), nil
}
func (fw *mockWriter) GetWritten() (b []byte) {
return fw.written
}
func TestApp_SetStdout(t *testing.T) {
w := &mockWriter{}
app := NewApp()
app.Name = "test"
app.Writer = w
err := app.Run([]string{"help"})
if err != nil {
t.Fatalf("Run error: %s", err)
}
if len(w.written) == 0 {
t.Error("App did not write output to desired writer.")
}
}
func TestApp_BeforeFunc(t *testing.T) {
counts := &opCounts{}
beforeError := fmt.Errorf("fail")
var err error
app := NewApp()
app.Before = func(c *Context) error {
counts.Total++
counts.Before = counts.Total
s := c.String("opt")
if s == "fail" {
return beforeError
}
return nil
}
app.Commands = []Command{
{
Name: "sub",
Action: func(c *Context) error {
counts.Total++
counts.SubCommand = counts.Total
return nil
},
},
}
app.Flags = []Flag{
StringFlag{Name: "opt"},
}
// run with the Before() func succeeding
err = app.Run([]string{"command", "--opt", "succeed", "sub"})
if err != nil {
t.Fatalf("Run error: %s", err)
}
if counts.Before != 1 {
t.Errorf("Before() not executed when expected")
}
if counts.SubCommand != 2 {
t.Errorf("Subcommand not executed when expected")
}
// reset
counts = &opCounts{}
// run with the Before() func failing
err = app.Run([]string{"command", "--opt", "fail", "sub"})
// should be the same error produced by the Before func
if err != beforeError {
t.Errorf("Run error expected, but not received")
}
if counts.Before != 1 {
t.Errorf("Before() not executed when expected")
}
if counts.SubCommand != 0 {
t.Errorf("Subcommand executed when NOT expected")
}
// reset
counts = &opCounts{}
afterError := errors.New("fail again")
app.After = func(_ *Context) error {
return afterError
}
// run with the Before() func failing, wrapped by After()
err = app.Run([]string{"command", "--opt", "fail", "sub"})
// should be the same error produced by the Before func
if _, ok := err.(MultiError); !ok {
t.Errorf("MultiError expected, but not received")
}
if counts.Before != 1 {
t.Errorf("Before() not executed when expected")
}
if counts.SubCommand != 0 {
t.Errorf("Subcommand executed when NOT expected")
}
}
func TestApp_AfterFunc(t *testing.T) {
counts := &opCounts{}
afterError := fmt.Errorf("fail")
var err error
app := NewApp()
app.After = func(c *Context) error {
counts.Total++
counts.After = counts.Total
s := c.String("opt")
if s == "fail" {
return afterError
}
return nil
}
app.Commands = []Command{
{
Name: "sub",
Action: func(c *Context) error {
counts.Total++
counts.SubCommand = counts.Total
return nil
},
},
}
app.Flags = []Flag{
StringFlag{Name: "opt"},
}
// run with the After() func succeeding
err = app.Run([]string{"command", "--opt", "succeed", "sub"})
if err != nil {
t.Fatalf("Run error: %s", err)
}
if counts.After != 2 {
t.Errorf("After() not executed when expected")
}
if counts.SubCommand != 1 {
t.Errorf("Subcommand not executed when expected")
}
// reset
counts = &opCounts{}
// run with the Before() func failing
err = app.Run([]string{"command", "--opt", "fail", "sub"})
// should be the same error produced by the Before func
if err != afterError {
t.Errorf("Run error expected, but not received")
}
if counts.After != 2 {
t.Errorf("After() not executed when expected")
}
if counts.SubCommand != 1 {
t.Errorf("Subcommand not executed when expected")
}
}
func TestAppNoHelpFlag(t *testing.T) {
oldFlag := HelpFlag
defer func() {
HelpFlag = oldFlag
}()
HelpFlag = BoolFlag{}
app := NewApp()
app.Writer = ioutil.Discard
err := app.Run([]string{"test", "-h"})
if err != flag.ErrHelp {
t.Errorf("expected error about missing help flag, but got: %s (%T)", err, err)
}
}
func TestRequiredFlagAppRunBehavior(t *testing.T) {
tdata := []struct {
testCase string
appFlags []Flag
appRunInput []string
appCommands []Command
expectedAnError bool
}{
// assertion: empty input, when a required flag is present, errors
{
testCase: "error_case_empty_input_with_required_flag_on_app",
appRunInput: []string{"myCLI"},
appFlags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
expectedAnError: true,
},
{
testCase: "error_case_empty_input_with_required_flag_on_command",
appRunInput: []string{"myCLI", "myCommand"},
appCommands: []Command{Command{
Name: "myCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
}},
expectedAnError: true,
},
{
testCase: "error_case_empty_input_with_required_flag_on_subcommand",
appRunInput: []string{"myCLI", "myCommand", "mySubCommand"},
appCommands: []Command{Command{
Name: "myCommand",
Subcommands: []Command{Command{
Name: "mySubCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
}},
}},
expectedAnError: true,
},
// assertion: inputing --help, when a required flag is present, does not error
{
testCase: "valid_case_help_input_with_required_flag_on_app",
appRunInput: []string{"myCLI", "--help"},
appFlags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
},
{
testCase: "valid_case_help_input_with_required_flag_on_command",
appRunInput: []string{"myCLI", "myCommand", "--help"},
appCommands: []Command{Command{
Name: "myCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
}},
},
{
testCase: "valid_case_help_input_with_required_flag_on_subcommand",
appRunInput: []string{"myCLI", "myCommand", "mySubCommand", "--help"},
appCommands: []Command{Command{
Name: "myCommand",
Subcommands: []Command{Command{
Name: "mySubCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
}},
}},
},
// assertion: giving optional input, when a required flag is present, errors
{
testCase: "error_case_optional_input_with_required_flag_on_app",
appRunInput: []string{"myCLI", "--optional", "cats"},
appFlags: []Flag{StringFlag{Name: "requiredFlag", Required: true}, StringFlag{Name: "optional"}},
expectedAnError: true,
},
{
testCase: "error_case_optional_input_with_required_flag_on_command",
appRunInput: []string{"myCLI", "myCommand", "--optional", "cats"},
appCommands: []Command{Command{
Name: "myCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}, StringFlag{Name: "optional"}},
}},
expectedAnError: true,
},
{
testCase: "error_case_optional_input_with_required_flag_on_subcommand",
appRunInput: []string{"myCLI", "myCommand", "mySubCommand", "--optional", "cats"},
appCommands: []Command{Command{
Name: "myCommand",
Subcommands: []Command{Command{
Name: "mySubCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}, StringFlag{Name: "optional"}},
}},
}},
expectedAnError: true,
},
// assertion: when a required flag is present, inputting that required flag does not error
{
testCase: "valid_case_required_flag_input_on_app",
appRunInput: []string{"myCLI", "--requiredFlag", "cats"},
appFlags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
},
{
testCase: "valid_case_required_flag_input_on_command",
appRunInput: []string{"myCLI", "myCommand", "--requiredFlag", "cats"},
appCommands: []Command{Command{
Name: "myCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
}},
},
{
testCase: "valid_case_required_flag_input_on_subcommand",
appRunInput: []string{"myCLI", "myCommand", "mySubCommand", "--requiredFlag", "cats"},
appCommands: []Command{Command{
Name: "myCommand",
Subcommands: []Command{Command{
Name: "mySubCommand",
Flags: []Flag{StringFlag{Name: "requiredFlag", Required: true}},
}},
}},
},
}
for _, test := range tdata {
t.Run(test.testCase, func(t *testing.T) {
// setup
app := NewApp()
app.Flags = test.appFlags
app.Commands = test.appCommands
// logic under test
err := app.Run(test.appRunInput)
// assertions
if test.expectedAnError && err == nil {
t.Errorf("expected an error, but there was none")
}
if _, ok := err.(requiredFlagsErr); test.expectedAnError && !ok {
t.Errorf("expected a requiredFlagsErr, but got: %s", err)
}
if !test.expectedAnError && err != nil {
t.Errorf("did not expected an error, but there was one: %s", err)
}
})
}
}
func TestAppHelpPrinter(t *testing.T) {
oldPrinter := HelpPrinter
defer func() {
HelpPrinter = oldPrinter
}()
var wasCalled = false
HelpPrinter = func(w io.Writer, template string, data interface{}) {
wasCalled = true
}
app := NewApp()
app.Run([]string{"-h"})
if wasCalled == false {
t.Errorf("Help printer expected to be called, but was not")
}
}
func TestApp_VersionPrinter(t *testing.T) {
oldPrinter := VersionPrinter
defer func() {
VersionPrinter = oldPrinter
}()
var wasCalled = false
VersionPrinter = func(c *Context) {
wasCalled = true
}
app := NewApp()
ctx := NewContext(app, nil, nil)
ShowVersion(ctx)
if wasCalled == false {
t.Errorf("Version printer expected to be called, but was not")
}
}
func TestApp_CommandNotFound(t *testing.T) {
counts := &opCounts{}
app := NewApp()
app.CommandNotFound = func(c *Context, command string) {
counts.Total++
counts.CommandNotFound = counts.Total
}
app.Commands = []Command{
{
Name: "bar",
Action: func(c *Context) error {
counts.Total++
counts.SubCommand = counts.Total
return nil
},
},
}
app.Run([]string{"command", "foo"})
expect(t, counts.CommandNotFound, 1)
expect(t, counts.SubCommand, 0)
expect(t, counts.Total, 1)
}
func TestApp_OrderOfOperations(t *testing.T) {
counts := &opCounts{}
resetCounts := func() { counts = &opCounts{} }
app := NewApp()
app.EnableBashCompletion = true
app.BashComplete = func(c *Context) {
counts.Total++
counts.BashComplete = counts.Total
}
app.OnUsageError = func(c *Context, err error, isSubcommand bool) error {
counts.Total++
counts.OnUsageError = counts.Total
return errors.New("hay OnUsageError")
}
beforeNoError := func(c *Context) error {
counts.Total++
counts.Before = counts.Total
return nil
}
beforeError := func(c *Context) error {
counts.Total++
counts.Before = counts.Total
return errors.New("hay Before")
}
app.Before = beforeNoError
app.CommandNotFound = func(c *Context, command string) {
counts.Total++
counts.CommandNotFound = counts.Total
}
afterNoError := func(c *Context) error {
counts.Total++
counts.After = counts.Total
return nil
}
afterError := func(c *Context) error {
counts.Total++
counts.After = counts.Total
return errors.New("hay After")
}
app.After = afterNoError
app.Commands = []Command{
{
Name: "bar",
Action: func(c *Context) error {
counts.Total++
counts.SubCommand = counts.Total
return nil
},
},
}
app.Action = func(c *Context) error {
counts.Total++
counts.Action = counts.Total
return nil
}
_ = app.Run([]string{"command", "--nope"})
expect(t, counts.OnUsageError, 1)
expect(t, counts.Total, 1)
resetCounts()
_ = app.Run([]string{"command", "--generate-bash-completion"})
expect(t, counts.BashComplete, 1)
expect(t, counts.Total, 1)
resetCounts()
oldOnUsageError := app.OnUsageError
app.OnUsageError = nil
_ = app.Run([]string{"command", "--nope"})
expect(t, counts.Total, 0)
app.OnUsageError = oldOnUsageError
resetCounts()
_ = app.Run([]string{"command", "foo"})
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.CommandNotFound, 0)
expect(t, counts.Action, 2)
expect(t, counts.After, 3)
expect(t, counts.Total, 3)
resetCounts()
app.Before = beforeError
_ = app.Run([]string{"command", "bar"})
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.After, 2)
expect(t, counts.Total, 2)
app.Before = beforeNoError
resetCounts()
app.After = nil
_ = app.Run([]string{"command", "bar"})
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.SubCommand, 2)
expect(t, counts.Total, 2)
app.After = afterNoError
resetCounts()
app.After = afterError
err := app.Run([]string{"command", "bar"})
if err == nil {
t.Fatalf("expected a non-nil error")
}
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.SubCommand, 2)
expect(t, counts.After, 3)
expect(t, counts.Total, 3)
app.After = afterNoError
resetCounts()
oldCommands := app.Commands
app.Commands = nil
_ = app.Run([]string{"command"})
expect(t, counts.OnUsageError, 0)
expect(t, counts.Before, 1)
expect(t, counts.Action, 2)
expect(t, counts.After, 3)
expect(t, counts.Total, 3)
app.Commands = oldCommands
}
func TestApp_Run_CommandWithSubcommandHasHelpTopic(t *testing.T) {
var subcommandHelpTopics = [][]string{
{"command", "foo", "--help"},
{"command", "foo", "-h"},
{"command", "foo", "help"},
}
for _, flagSet := range subcommandHelpTopics {
t.Logf("==> checking with flags %v", flagSet)
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
subCmdBar := Command{
Name: "bar",
Usage: "does bar things",
}
subCmdBaz := Command{
Name: "baz",
Usage: "does baz things",
}
cmd := Command{
Name: "foo",
Description: "descriptive wall of text about how it does foo things",
Subcommands: []Command{subCmdBar, subCmdBaz},
Action: func(c *Context) error { return nil },
}
app.Commands = []Command{cmd}
err := app.Run(flagSet)
if err != nil {
t.Error(err)
}
output := buf.String()
t.Logf("output: %q\n", buf.Bytes())
if strings.Contains(output, "No help topic for") {
t.Errorf("expect a help topic, got none: \n%q", output)
}
for _, shouldContain := range []string{
cmd.Name, cmd.Description,
subCmdBar.Name, subCmdBar.Usage,
subCmdBaz.Name, subCmdBaz.Usage,
} {
if !strings.Contains(output, shouldContain) {
t.Errorf("want help to contain %q, did not: \n%q", shouldContain, output)
}
}
}
}
func TestApp_Run_SubcommandFullPath(t *testing.T) {
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
app.Name = "command"
subCmd := Command{
Name: "bar",
Usage: "does bar things",
}
cmd := Command{
Name: "foo",
Description: "foo commands",
Subcommands: []Command{subCmd},
}
app.Commands = []Command{cmd}
err := app.Run([]string{"command", "foo", "bar", "--help"})
if err != nil {
t.Error(err)
}
output := buf.String()
if !strings.Contains(output, "command foo bar - does bar things") {
t.Errorf("expected full path to subcommand: %s", output)
}
if !strings.Contains(output, "command foo bar [arguments...]") {
t.Errorf("expected full path to subcommand: %s", output)
}
}
func TestApp_Run_SubcommandHelpName(t *testing.T) {
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
app.Name = "command"
subCmd := Command{
Name: "bar",
HelpName: "custom",
Usage: "does bar things",
}
cmd := Command{
Name: "foo",
Description: "foo commands",
Subcommands: []Command{subCmd},
}
app.Commands = []Command{cmd}
err := app.Run([]string{"command", "foo", "bar", "--help"})
if err != nil {
t.Error(err)
}
output := buf.String()
if !strings.Contains(output, "custom - does bar things") {
t.Errorf("expected HelpName for subcommand: %s", output)
}
if !strings.Contains(output, "custom [arguments...]") {
t.Errorf("expected HelpName to subcommand: %s", output)
}
}
func TestApp_Run_CommandHelpName(t *testing.T) {
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
app.Name = "command"
subCmd := Command{
Name: "bar",
Usage: "does bar things",
}
cmd := Command{
Name: "foo",
HelpName: "custom",
Description: "foo commands",
Subcommands: []Command{subCmd},
}
app.Commands = []Command{cmd}
err := app.Run([]string{"command", "foo", "bar", "--help"})
if err != nil {
t.Error(err)
}
output := buf.String()
if !strings.Contains(output, "command foo bar - does bar things") {
t.Errorf("expected full path to subcommand: %s", output)
}
if !strings.Contains(output, "command foo bar [arguments...]") {
t.Errorf("expected full path to subcommand: %s", output)
}
}
func TestApp_Run_CommandSubcommandHelpName(t *testing.T) {
app := NewApp()
buf := new(bytes.Buffer)
app.Writer = buf
app.Name = "base"
subCmd := Command{
Name: "bar",
HelpName: "custom",
Usage: "does bar things",
}
cmd := Command{
Name: "foo",
Description: "foo commands",
Subcommands: []Command{subCmd},
}
app.Commands = []Command{cmd}
err := app.Run([]string{"command", "foo", "--help"})
if err != nil {
t.Error(err)
}
output := buf.String()
if !strings.Contains(output, "base foo - foo commands") {
t.Errorf("expected full path to subcommand: %s", output)
}
if !strings.Contains(output, "base foo command [command options] [arguments...]") {
t.Errorf("expected full path to subcommand: %s", output)
}
}
func TestApp_Run_Help(t *testing.T) {
var helpArguments = [][]string{{"boom", "--help"}, {"boom", "-h"}, {"boom", "help"}}
for _, args := range helpArguments {
buf := new(bytes.Buffer)
t.Logf("==> checking with arguments %v", args)
app := NewApp()
app.Name = "boom"
app.Usage = "make an explosive entrance"
app.Writer = buf
app.Action = func(c *Context) error {
buf.WriteString("boom I say!")
return nil
}
err := app.Run(args)
if err != nil {
t.Error(err)
}
output := buf.String()
t.Logf("output: %q\n", buf.Bytes())
if !strings.Contains(output, "boom - make an explosive entrance") {
t.Errorf("want help to contain %q, did not: \n%q", "boom - make an explosive entrance", output)
}
}
}
func TestApp_Run_Version(t *testing.T) {
var versionArguments = [][]string{{"boom", "--version"}, {"boom", "-v"}}
for _, args := range versionArguments {
buf := new(bytes.Buffer)
t.Logf("==> checking with arguments %v", args)
app := NewApp()
app.Name = "boom"
app.Usage = "make an explosive entrance"
app.Version = "0.1.0"
app.Writer = buf
app.Action = func(c *Context) error {
buf.WriteString("boom I say!")
return nil
}
err := app.Run(args)
if err != nil {
t.Error(err)
}
output := buf.String()
t.Logf("output: %q\n", buf.Bytes())
if !strings.Contains(output, "0.1.0") {
t.Errorf("want version to contain %q, did not: \n%q", "0.1.0", output)
}
}
}
func TestApp_Run_Categories(t *testing.T) {
app := NewApp()
app.Name = "categories"
app.HideHelp = true
app.Commands = []Command{
{
Name: "command1",
Category: "1",
},
{
Name: "command2",
Category: "1",
},
{
Name: "command3",
Category: "2",
},
}
buf := new(bytes.Buffer)
app.Writer = buf
app.Run([]string{"categories"})
expect := CommandCategories{
&CommandCategory{
Name: "1",
Commands: []Command{
app.Commands[0],
app.Commands[1],
},
},
&CommandCategory{
Name: "2",
Commands: []Command{
app.Commands[2],
},
},
}
if !reflect.DeepEqual(app.Categories(), expect) {
t.Fatalf("expected categories %#v, to equal %#v", app.Categories(), expect)
}
output := buf.String()
t.Logf("output: %q\n", buf.Bytes())
if !strings.Contains(output, "1:\n command1") {
t.Errorf("want buffer to include category %q, did not: \n%q", "1:\n command1", output)
}
}
func TestApp_VisibleCategories(t *testing.T) {
app := NewApp()
app.Name = "visible-categories"
app.HideHelp = true
app.Commands = []Command{
{
Name: "command1",
Category: "1",
HelpName: "foo command1",
Hidden: true,
},
{
Name: "command2",
Category: "2",
HelpName: "foo command2",
},
{
Name: "command3",
Category: "3",
HelpName: "foo command3",
},
}
expected := []*CommandCategory{
{
Name: "2",
Commands: []Command{
app.Commands[1],
},
},
{
Name: "3",
Commands: []Command{
app.Commands[2],
},
},
}
app.Setup()
expect(t, expected, app.VisibleCategories())
app = NewApp()
app.Name = "visible-categories"
app.HideHelp = true
app.Commands = []Command{
{
Name: "command1",
Category: "1",
HelpName: "foo command1",
Hidden: true,
},
{
Name: "command2",
Category: "2",
HelpName: "foo command2",
Hidden: true,
},
{
Name: "command3",
Category: "3",
HelpName: "foo command3",
},
}
expected = []*CommandCategory{
{
Name: "3",
Commands: []Command{
app.Commands[2],
},
},
}
app.Setup()
expect(t, expected, app.VisibleCategories())
app = NewApp()
app.Name = "visible-categories"
app.HideHelp = true
app.Commands = []Command{
{
Name: "command1",
Category: "1",
HelpName: "foo command1",
Hidden: true,
},
{
Name: "command2",
Category: "2",
HelpName: "foo command2",
Hidden: true,
},
{
Name: "command3",
Category: "3",
HelpName: "foo command3",
Hidden: true,
},
}
expected = []*CommandCategory{}
app.Setup()
expect(t, expected, app.VisibleCategories())
}
func TestApp_Run_DoesNotOverwriteErrorFromBefore(t *testing.T) {
app := NewApp()
app.Action = func(c *Context) error { return nil }
app.Before = func(c *Context) error { return fmt.Errorf("before error") }
app.After = func(c *Context) error { return fmt.Errorf("after error") }
err := app.Run([]string{"foo"})
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
if !strings.Contains(err.Error(), "before error") {
t.Errorf("expected text of error from Before method, but got none in \"%v\"", err)
}
if !strings.Contains(err.Error(), "after error") {
t.Errorf("expected text of error from After method, but got none in \"%v\"", err)
}
}
func TestApp_Run_SubcommandDoesNotOverwriteErrorFromBefore(t *testing.T) {
app := NewApp()
app.Commands = []Command{
{
Subcommands: []Command{
{
Name: "sub",
},
},
Name: "bar",
Before: func(c *Context) error { return fmt.Errorf("before error") },
After: func(c *Context) error { return fmt.Errorf("after error") },
},
}
err := app.Run([]string{"foo", "bar"})
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
if !strings.Contains(err.Error(), "before error") {
t.Errorf("expected text of error from Before method, but got none in \"%v\"", err)
}
if !strings.Contains(err.Error(), "after error") {
t.Errorf("expected text of error from After method, but got none in \"%v\"", err)
}
}
func TestApp_OnUsageError_WithWrongFlagValue(t *testing.T) {
app := NewApp()
app.Flags = []Flag{
IntFlag{Name: "flag"},
}
app.OnUsageError = func(c *Context, err error, isSubcommand bool) error {
if isSubcommand {
t.Errorf("Expect no subcommand")
}
if !strings.HasPrefix(err.Error(), "invalid value \"wrong\"") {
t.Errorf("Expect an invalid value error, but got \"%v\"", err)
}
return errors.New("intercepted: " + err.Error())
}
app.Commands = []Command{
{
Name: "bar",
},
}
err := app.Run([]string{"foo", "--flag=wrong"})
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
if !strings.HasPrefix(err.Error(), "intercepted: invalid value") {
t.Errorf("Expect an intercepted error, but got \"%v\"", err)
}
}
func TestApp_OnUsageError_WithWrongFlagValue_ForSubcommand(t *testing.T) {
app := NewApp()
app.Flags = []Flag{
IntFlag{Name: "flag"},
}
app.OnUsageError = func(c *Context, err error, isSubcommand bool) error {
if isSubcommand {
t.Errorf("Expect subcommand")
}
if !strings.HasPrefix(err.Error(), "invalid value \"wrong\"") {
t.Errorf("Expect an invalid value error, but got \"%v\"", err)
}
return errors.New("intercepted: " + err.Error())
}
app.Commands = []Command{
{
Name: "bar",
},
}
err := app.Run([]string{"foo", "--flag=wrong", "bar"})
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
if !strings.HasPrefix(err.Error(), "intercepted: invalid value") {
t.Errorf("Expect an intercepted error, but got \"%v\"", err)
}
}
// A custom flag that conforms to the relevant interfaces, but has none of the
// fields that the other flag types do.
type customBoolFlag struct {
Nombre string
}
// Don't use the normal FlagStringer
func (c *customBoolFlag) String() string {
return "***" + c.Nombre + "***"
}
func (c *customBoolFlag) GetName() string {
return c.Nombre
}
func (c *customBoolFlag) Apply(set *flag.FlagSet) {
set.String(c.Nombre, c.Nombre, "")
}
func TestCustomFlagsUnused(t *testing.T) {
app := NewApp()
app.Flags = []Flag{&customBoolFlag{"custom"}}
err := app.Run([]string{"foo"})
if err != nil {
t.Errorf("Run returned unexpected error: %v", err)
}
}
func TestCustomFlagsUsed(t *testing.T) {
app := NewApp()
app.Flags = []Flag{&customBoolFlag{"custom"}}
err := app.Run([]string{"foo", "--custom=bar"})
if err != nil {
t.Errorf("Run returned unexpected error: %v", err)
}
}
func TestCustomHelpVersionFlags(t *testing.T) {
app := NewApp()
// Be sure to reset the global flags
defer func(helpFlag Flag, versionFlag Flag) {
HelpFlag = helpFlag
VersionFlag = versionFlag
}(HelpFlag, VersionFlag)
HelpFlag = &customBoolFlag{"help-custom"}
VersionFlag = &customBoolFlag{"version-custom"}
err := app.Run([]string{"foo", "--help-custom=bar"})
if err != nil {
t.Errorf("Run returned unexpected error: %v", err)
}
}
func TestHandleAction_WithNonFuncAction(t *testing.T) {
app := NewApp()
app.Action = 42
fs, err := flagSet(app.Name, app.Flags)
if err != nil {
t.Errorf("error creating FlagSet: %s", err)
}
err = HandleAction(app.Action, NewContext(app, fs, nil))
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
exitErr, ok := err.(*ExitError)
if !ok {
t.Fatalf("expected to receive a *ExitError")
}
if !strings.HasPrefix(exitErr.Error(), "ERROR invalid Action type.") {
t.Fatalf("expected an unknown Action error, but got: %v", exitErr.Error())
}
if exitErr.ExitCode() != 2 {
t.Fatalf("expected error exit code to be 2, but got: %v", exitErr.ExitCode())
}
}
func TestHandleAction_WithInvalidFuncSignature(t *testing.T) {
app := NewApp()
app.Action = func() string { return "" }
fs, err := flagSet(app.Name, app.Flags)
if err != nil {
t.Errorf("error creating FlagSet: %s", err)
}
err = HandleAction(app.Action, NewContext(app, fs, nil))
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
exitErr, ok := err.(*ExitError)
if !ok {
t.Fatalf("expected to receive a *ExitError")
}
if !strings.HasPrefix(exitErr.Error(), "ERROR invalid Action type") {
t.Fatalf("expected an unknown Action error, but got: %v", exitErr.Error())
}
if exitErr.ExitCode() != 2 {
t.Fatalf("expected error exit code to be 2, but got: %v", exitErr.ExitCode())
}
}
func TestHandleAction_WithInvalidFuncReturnSignature(t *testing.T) {
app := NewApp()
app.Action = func(_ *Context) (int, error) { return 0, nil }
fs, err := flagSet(app.Name, app.Flags)
if err != nil {
t.Errorf("error creating FlagSet: %s", err)
}
err = HandleAction(app.Action, NewContext(app, fs, nil))
if err == nil {
t.Fatalf("expected to receive error from Run, got none")
}
exitErr, ok := err.(*ExitError)
if !ok {
t.Fatalf("expected to receive a *ExitError")
}
if !strings.HasPrefix(exitErr.Error(), "ERROR invalid Action type") {
t.Fatalf("expected an invalid Action signature error, but got: %v", exitErr.Error())
}
if exitErr.ExitCode() != 2 {
t.Fatalf("expected error exit code to be 2, but got: %v", exitErr.ExitCode())
}
}
func TestHandleExitCoder_Default(t *testing.T) {
app := NewApp()
fs, err := flagSet(app.Name, app.Flags)
if err != nil {
t.Errorf("error creating FlagSet: %s", err)
}
ctx := NewContext(app, fs, nil)
app.handleExitCoder(ctx, NewExitError("Default Behavior Error", 42))
output := fakeErrWriter.String()
if !strings.Contains(output, "Default") {
t.Fatalf("Expected Default Behavior from Error Handler but got: %s", output)
}
}
func TestHandleExitCoder_Custom(t *testing.T) {
app := NewApp()
fs, err := flagSet(app.Name, app.Flags)
if err != nil {
t.Errorf("error creating FlagSet: %s", err)
}
app.ExitErrHandler = func(_ *Context, _ error) {
fmt.Fprintln(ErrWriter, "I'm a Custom error handler, I print what I want!")
}
ctx := NewContext(app, fs, nil)
app.handleExitCoder(ctx, NewExitError("Default Behavior Error", 42))
output := fakeErrWriter.String()
if !strings.Contains(output, "Custom") {
t.Fatalf("Expected Custom Behavior from Error Handler but got: %s", output)
}
}
func TestHandleAction_WithUnknownPanic(t *testing.T) {
defer func() { refute(t, recover(), nil) }()
var fn ActionFunc
app := NewApp()
app.Action = func(ctx *Context) error {
fn(ctx)
return nil
}
fs, err := flagSet(app.Name, app.Flags)
if err != nil {
t.Errorf("error creating FlagSet: %s", err)
}
HandleAction(app.Action, NewContext(app, fs, nil))
}
func TestShellCompletionForIncompleteFlags(t *testing.T) {
app := NewApp()
app.Flags = []Flag{
IntFlag{
Name: "test-completion",
},
}
app.EnableBashCompletion = true
app.BashComplete = func(ctx *Context) {
for _, command := range ctx.App.Commands {
if command.Hidden {
continue
}
for _, name := range command.Names() {
fmt.Fprintln(ctx.App.Writer, name)
}
}
for _, flag := range ctx.App.Flags {
for _, name := range strings.Split(flag.GetName(), ",") {
if name == BashCompletionFlag.GetName() {
continue
}
switch name = strings.TrimSpace(name); len(name) {
case 0:
case 1:
fmt.Fprintln(ctx.App.Writer, "-"+name)
default:
fmt.Fprintln(ctx.App.Writer, "--"+name)
}
}
}
}
app.Action = func(ctx *Context) error {
return fmt.Errorf("should not get here")
}
err := app.Run([]string{"", "--test-completion", "--" + BashCompletionFlag.GetName()})
if err != nil {
t.Errorf("app should not return an error: %s", err)
}
}
func TestHandleActionActuallyWorksWithActions(t *testing.T) {
var f ActionFunc
called := false
f = func(c *Context) error {
called = true
return nil
}
err := HandleAction(f, nil)
if err != nil {
t.Errorf("Should not have errored: %v", err)
}
if !called {
t.Errorf("Function was not called")
}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rpc
import (
"fmt"
"net/http"
"reflect"
"strings"
)
// ----------------------------------------------------------------------------
// Codec
// ----------------------------------------------------------------------------
// Codec creates a CodecRequest to process each request.
type Codec interface {
NewRequest(*http.Request) CodecRequest
}
// CodecRequest decodes a request and encodes a response using a specific
// serialization scheme.
type CodecRequest interface {
// Reads the request and returns the RPC method name.
Method() (string, error)
// Reads the request filling the RPC method args.
ReadRequest(interface{}) error
// Writes the response using the RPC method reply.
WriteResponse(http.ResponseWriter, interface{})
// Writes an error produced by the server.
WriteError(w http.ResponseWriter, status int, err error)
}
// ----------------------------------------------------------------------------
// Server
// ----------------------------------------------------------------------------
// NewServer returns a new RPC server.
func NewServer() *Server {
return &Server{
codecs: make(map[string]Codec),
services: new(serviceMap),
}
}
// Server serves registered RPC services using registered codecs.
type Server struct {
codecs map[string]Codec
services *serviceMap
}
// RegisterCodec adds a new codec to the server.
//
// Codecs are defined to process a given serialization scheme, e.g., JSON or
// XML. A codec is chosen based on the "Content-Type" header from the request,
// excluding the charset definition.
func (s *Server) RegisterCodec(codec Codec, contentType string) {
s.codecs[strings.ToLower(contentType)] = codec
}
// RegisterService adds a new service to the server.
//
// The name parameter is optional: if empty it will be inferred from
// the receiver type name.
//
// Methods from the receiver will be extracted if these rules are satisfied:
//
// - The receiver is exported (begins with an upper case letter) or local
// (defined in the package registering the service).
// - The method name is exported.
// - The method has three arguments: *http.Request, *args, *reply.
// - All three arguments are pointers.
// - The second and third arguments are exported or local.
// - The method has return type error.
//
// All other methods are ignored.
func (s *Server) RegisterService(receiver interface{}, name string) error {
return s.services.register(receiver, name)
}
// HasMethod returns true if the given method is registered.
//
// The method uses a dotted notation as in "Service.Method".
func (s *Server) HasMethod(method string) bool {
if _, _, err := s.services.get(method); err == nil {
return true
}
return false
}
// ServeHTTP
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
WriteError(w, 405, "rpc: POST method required, received "+r.Method)
return
}
contentType := r.Header.Get("Content-Type")
idx := strings.Index(contentType, ";")
if idx != -1 {
contentType = contentType[:idx]
}
var codec Codec
if contentType == "" && len(s.codecs) == 1 {
// If Content-Type is not set and only one codec has been registered,
// then default to that codec.
for _, c := range s.codecs {
codec = c
}
} else if codec = s.codecs[strings.ToLower(contentType)]; codec == nil {
WriteError(w, 415, "rpc: unrecognized Content-Type: "+contentType)
return
}
// Create a new codec request.
codecReq := codec.NewRequest(r)
// Get service method to be called.
method, errMethod := codecReq.Method()
if errMethod != nil {
codecReq.WriteError(w, 400, errMethod)
return
}
serviceSpec, methodSpec, errGet := s.services.get(method)
if errGet != nil {
codecReq.WriteError(w, 400, errGet)
return
}
// Decode the args.
args := reflect.New(methodSpec.argsType)
if errRead := codecReq.ReadRequest(args.Interface()); errRead != nil {
codecReq.WriteError(w, 400, errRead)
return
}
// Call the service method.
reply := reflect.New(methodSpec.replyType)
errValue := methodSpec.method.Func.Call([]reflect.Value{
serviceSpec.rcvr,
reflect.ValueOf(r),
args,
reply,
})
// Cast the result to error if needed.
var errResult error
errInter := errValue[0].Interface()
if errInter != nil {
errResult = errInter.(error)
}
// Prevents Internet Explorer from MIME-sniffing a response away
// from the declared content-type
w.Header().Set("x-content-type-options", "nosniff")
// Encode the response.
if errResult == nil {
codecReq.WriteResponse(w, reply.Interface())
} else {
codecReq.WriteError(w, 400, errResult)
}
}
func WriteError(w http.ResponseWriter, status int, msg string) {
w.WriteHeader(status)
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprint(w, msg)
}
Add security HTTP Headers (#5805)
Some HTTP security headers in Minio.
To avoid problems with XSS and Clickjacking attacks.
X-Frame-Options
X-Frame-Options response header improve the protection
of web applications against Clickjacking. It declares a
policy communicated from a host to the client browser
on whether the browser must not display the transmitted
content in frames of other web pages.
X-XSS-Protection
This header enables the Cross-site scripting (XSS) filter in your browser.
// Copyright 2009 The Go Authors. All rights reserved.
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package rpc
import (
"fmt"
"net/http"
"reflect"
"strings"
)
// ----------------------------------------------------------------------------
// Codec
// ----------------------------------------------------------------------------
// Codec creates a CodecRequest to process each request.
type Codec interface {
NewRequest(*http.Request) CodecRequest
}
// CodecRequest decodes a request and encodes a response using a specific
// serialization scheme.
type CodecRequest interface {
// Reads the request and returns the RPC method name.
Method() (string, error)
// Reads the request filling the RPC method args.
ReadRequest(interface{}) error
// Writes the response using the RPC method reply.
WriteResponse(http.ResponseWriter, interface{})
// Writes an error produced by the server.
WriteError(w http.ResponseWriter, status int, err error)
}
// ----------------------------------------------------------------------------
// Server
// ----------------------------------------------------------------------------
// NewServer returns a new RPC server.
func NewServer() *Server {
return &Server{
codecs: make(map[string]Codec),
services: new(serviceMap),
}
}
// Server serves registered RPC services using registered codecs.
type Server struct {
codecs map[string]Codec
services *serviceMap
}
// RegisterCodec adds a new codec to the server.
//
// Codecs are defined to process a given serialization scheme, e.g., JSON or
// XML. A codec is chosen based on the "Content-Type" header from the request,
// excluding the charset definition.
func (s *Server) RegisterCodec(codec Codec, contentType string) {
s.codecs[strings.ToLower(contentType)] = codec
}
// RegisterService adds a new service to the server.
//
// The name parameter is optional: if empty it will be inferred from
// the receiver type name.
//
// Methods from the receiver will be extracted if these rules are satisfied:
//
// - The receiver is exported (begins with an upper case letter) or local
// (defined in the package registering the service).
// - The method name is exported.
// - The method has three arguments: *http.Request, *args, *reply.
// - All three arguments are pointers.
// - The second and third arguments are exported or local.
// - The method has return type error.
//
// All other methods are ignored.
func (s *Server) RegisterService(receiver interface{}, name string) error {
return s.services.register(receiver, name)
}
// HasMethod returns true if the given method is registered.
//
// The method uses a dotted notation as in "Service.Method".
func (s *Server) HasMethod(method string) bool {
if _, _, err := s.services.get(method); err == nil {
return true
}
return false
}
// ServeHTTP
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
WriteError(w, 405, "rpc: POST method required, received "+r.Method)
return
}
contentType := r.Header.Get("Content-Type")
idx := strings.Index(contentType, ";")
if idx != -1 {
contentType = contentType[:idx]
}
var codec Codec
if contentType == "" && len(s.codecs) == 1 {
// If Content-Type is not set and only one codec has been registered,
// then default to that codec.
for _, c := range s.codecs {
codec = c
}
} else if codec = s.codecs[strings.ToLower(contentType)]; codec == nil {
WriteError(w, 415, "rpc: unrecognized Content-Type: "+contentType)
return
}
// Create a new codec request.
codecReq := codec.NewRequest(r)
// Get service method to be called.
method, errMethod := codecReq.Method()
if errMethod != nil {
codecReq.WriteError(w, 400, errMethod)
return
}
serviceSpec, methodSpec, errGet := s.services.get(method)
if errGet != nil {
codecReq.WriteError(w, 400, errGet)
return
}
// Decode the args.
args := reflect.New(methodSpec.argsType)
if errRead := codecReq.ReadRequest(args.Interface()); errRead != nil {
codecReq.WriteError(w, 400, errRead)
return
}
// Call the service method.
reply := reflect.New(methodSpec.replyType)
errValue := methodSpec.method.Func.Call([]reflect.Value{
serviceSpec.rcvr,
reflect.ValueOf(r),
args,
reply,
})
// Cast the result to error if needed.
var errResult error
errInter := errValue[0].Interface()
if errInter != nil {
errResult = errInter.(error)
}
// Prevents Internet Explorer from MIME-sniffing a response away
// from the declared content-type
w.Header().Set("x-content-type-options", "nosniff")
// Prevents against XSS Atacks
w.Header().Set("X-XSS-Protection", "\"1; mode=block\"")
// Prevents against Clickjacking
w.Header().Set("X-Frame-Options", "SAMEORIGIN")
// Encode the response.
if errResult == nil {
codecReq.WriteResponse(w, reply.Interface())
} else {
codecReq.WriteError(w, 400, errResult)
}
}
func WriteError(w http.ResponseWriter, status int, msg string) {
w.WriteHeader(status)
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprint(w, msg)
}
|
package model
import "fmt"
import "net/url"
import "net/http"
import "net/http/cookiejar"
import "github.com/ishiikurisu/moneylog"
import "os"
import "mime/multipart"
import "bufio"
/*****************************
* LOCAL STORAGE DEFINITIONS *
*****************************/
// This is the structure that will deal with the session's cookies. It will
// need to know which URL these cookies relate to, and its logic is already
// coded to understand the app's behaviour.
type LocalStorage struct {
// The actual data. The logic of this application. The reason we are here.
MoneyLog moneylog.Log
// The structure that will deal with our cookies.
CookieJar *cookiejar.Jar
// This is the URL these cookies refer to.
Url *url.URL
// This is where the current log is stored on local memory
LogFile string
}
// Creates a cookie storage structure.
func NewLocalStorage() (*LocalStorage, error) {
var storage *LocalStorage = nil
jar, oops := cookiejar.New(nil)
url, shit := url.Parse(GetAddress())
if oops == nil && shit == nil {
url.Scheme = "http"
url.Host = "heroku.com"
s := LocalStorage {
// TODO Implement actual cookie jar
MoneyLog: moneylog.EmptyLog(),
CookieJar: jar,
Url: url,
LogFile: "log.txt",
}
storage = &s
} else {
fmt.Println(shit)
}
return storage, oops
}
// Adds a cookie to the cookiejar and saves it on the user session.
// Cookies added here will last forever because of the serverless nature
// of this app.
func (storage *LocalStorage) AddCookie(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request) {
username, _ := getUserAndPassword(r)
cookie := http.Cookie {
Name: "UserName",
Value: username,
}
storage.CookieJar.SetCookies(storage.Url, append(storage.CookieJar.Cookies(storage.Url), &cookie))
http.SetCookie(w, &cookie)
return w, r
}
// Extracts the current money log based on the store cookies. If there is no
// log, a string of length 0 is returned.
func (storage *LocalStorage) GetLog(w http.ResponseWriter, r *http.Request) string {
outlet := ""
cookie, err := r.Cookie("MoneyLog")
if err == nil {
outlet = cookie.Value
} else {
fmt.Println(err)
}
return outlet
}
// Extracts the current user based on the store cookies. If there is no user,
// a string of length 0 is returned.
func (storage *LocalStorage) GetUser(w http.ResponseWriter, r *http.Request) string {
outlet := ""
cookie, err := r.Cookie("UserName")
if err == nil {
outlet = cookie.Value
} else {
fmt.Println(err)
}
return outlet
}
// Extracts stuff from raw strings
func stuffFromRaw(rawDescription, rawValue string) (string, float64) {
var description string
var value float64
description = rawDescription
fmt.Sscanf(rawValue, "%F", &value)
return description, value
}
// Adds the given raw entry to the log.
func (storage *LocalStorage) AddEntryFromRaw(rawDescription, rawValue string) {
description, value := stuffFromRaw(rawDescription, rawValue)
storage.MoneyLog.Add(description, value)
}
// Saves the current money log on a cookie.
func (storage *LocalStorage) SaveLog(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request) {
rawLog := storage.MoneyLog.ToString()
cookie := http.Cookie {
Name: "MoneyLog",
Value: rawLog,
MaxAge: 0,
}
http.SetCookie(w, &cookie)
return w, r
}
func (storage *LocalStorage) StoreLog(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request) {
rawLog := storage.MoneyLog.ToString()
fp, err := os.Create(storage.LogFile)
if err != nil {
panic(err)
}
defer fp.Close()
_, err = fp.WriteString(rawLog)
if err != nil {
panic(err)
}
fp.Sync()
return w, r
}
func (storage *LocalStorage) AddLogFromFile(mmf multipart.File) string {
buffer := bufio.NewReader(mmf)
current := ReadField(buffer)
outlet := ""
for current != "...," {
outlet += current
outlet += ","
current = ReadField(buffer)
}
outlet += current
return outlet
}
func ReadField(reader *bufio.Reader) string {
raw := make([]byte, 0)
raw, err := reader.ReadBytes(',')
if err != nil {
panic(err)
}
return string(raw)
}
func (storage *LocalStorage) AddEntryFromRawAndSaveLog(d, v string, w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request) {
description, value := stuffFromRaw(d, v)
raw := storage.GetLog(w, r)
log := moneylog.LogFromString(raw)
log.Add(description, value)
cookie := http.Cookie {
Name: "MoneyLog",
Value: log.ToString(),
}
http.SetCookie(w, &cookie)
return w, r
}
func (storage *LocalStorage) AddLogFromRawAndSaveLog(raw string, w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request) {
log := moneylog.LogFromString(raw)
cookie := http.Cookie {
Name: "MoneyLog",
Value: log.ToString(),
}
http.SetCookie(w, &cookie)
return w, r
}
Simplified storage struct
package model
import "fmt"
import "net/url"
import "net/http"
import "net/http/cookiejar"
import "github.com/ishiikurisu/moneylog"
import "mime/multipart"
import "bufio"
/*****************************
* LOCAL STORAGE DEFINITIONS *
*****************************/
// This is the structure that will deal with the session's cookies. It will
// need to know which URL these cookies relate to, and its logic is already
// coded to understand the app's behaviour.
type LocalStorage struct {
// The actual data. The logic of this application. The reason we are here.
MoneyLog moneylog.Log
// The structure that will deal with our cookies.
CookieJar *cookiejar.Jar
// This is the URL these cookies refer to.
Url *url.URL
// This is where the current log is stored on local memory
LogFile string
}
// Creates a cookie storage structure.
func NewLocalStorage() (*LocalStorage, error) {
var storage *LocalStorage = nil
jar, oops := cookiejar.New(nil)
url, shit := url.Parse(GetAddress())
if oops == nil && shit == nil {
url.Scheme = "http"
url.Host = "heroku.com"
s := LocalStorage {
// TODO Implement actual cookie jar
MoneyLog: moneylog.EmptyLog(),
CookieJar: jar,
Url: url,
LogFile: "log.txt",
}
storage = &s
} else {
fmt.Println(shit)
}
return storage, oops
}
// Extracts the current money log based on the store cookies. If there is no
// log, a string of length 0 is returned.
func (storage *LocalStorage) GetLog(w http.ResponseWriter, r *http.Request) string {
outlet := ""
cookie, err := r.Cookie("MoneyLog")
if err == nil {
outlet = cookie.Value
} else {
fmt.Println(err)
}
return outlet
}
// Extracts stuff from raw strings
func stuffFromRaw(rawDescription, rawValue string) (string, float64) {
var description string
var value float64
description = rawDescription
fmt.Sscanf(rawValue, "%F", &value)
return description, value
}
func (storage *LocalStorage) AddLogFromFile(mmf multipart.File) string {
buffer := bufio.NewReader(mmf)
current := ReadField(buffer)
outlet := ""
for current != "...," {
outlet += current
outlet += ","
current = ReadField(buffer)
}
outlet += current
return outlet
}
func ReadField(reader *bufio.Reader) string {
raw := make([]byte, 0)
raw, err := reader.ReadBytes(',')
if err != nil {
panic(err)
}
return string(raw)
}
func (storage *LocalStorage) AddEntryFromRawAndSaveLog(d, v string, w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request) {
description, value := stuffFromRaw(d, v)
raw := storage.GetLog(w, r)
log := moneylog.LogFromString(raw)
log.Add(description, value)
cookie := http.Cookie {
Name: "MoneyLog",
Value: log.ToString(),
}
http.SetCookie(w, &cookie)
return w, r
}
func (storage *LocalStorage) AddLogFromRawAndSaveLog(raw string, w http.ResponseWriter, r *http.Request) (http.ResponseWriter, *http.Request) {
log := moneylog.LogFromString(raw)
cookie := http.Cookie {
Name: "MoneyLog",
Value: log.ToString(),
}
http.SetCookie(w, &cookie)
return w, r
}
|
package models
type Direction int
const (
North Direction = 1 << iota
South
East
West
)
type Coordinate struct {
X, Y int
}
type Gopher struct {
// Current direction
Direction Direction
X, Y int
Path []Coordinate
Score int
Paths chan map[int][]Coordinate
Close chan struct{}
}
func NewGopher() *Gopher {
return &Gopher{
Paths: make(chan map[int][]Coordinate),
Close: make(chan struct{}),
}
}
Use map[string][]Coordinate
package models
type Direction int
const (
North Direction = 1 << iota
South
East
West
)
type Coordinate struct {
X int `json:"X"`
Y int `json:"Y"`
}
type Gopher struct {
// Current direction
Direction Direction
X, Y int
Path []Coordinate
Score int
Paths chan map[string][]Coordinate
Close chan struct{}
}
func NewGopher() *Gopher {
return &Gopher{
Paths: make(chan map[string][]Coordinate),
Close: make(chan struct{}),
}
}
|
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package models
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/go-xorm/xorm"
"github.com/robfig/cron"
"github.com/gpmgo/switch/modules/log"
"github.com/gpmgo/switch/modules/setting"
)
var (
x *xorm.Engine
)
func init() {
var err error
x, err = xorm.NewEngine("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8",
setting.Cfg.MustValue("database", "USER"),
setting.Cfg.MustValue("database", "PASSWD"),
setting.Cfg.MustValue("database", "HOST"),
setting.Cfg.MustValue("database", "NAME")))
if err != nil {
log.Fatal(4, "Fail to init new engine: %v", err)
} else if err = x.Sync(new(Package), new(Revision), new(Downloader)); err != nil {
log.Fatal(4, "Fail to sync database: %v", err)
}
statistic()
c := cron.New()
c.AddFunc("@every 5m", statistic)
}
func Ping() error {
return x.Ping()
}
type DownloadStats struct {
NumTotalDownload int64
}
type Stats struct {
NumPackages, NumDownloaders int64
DownloadStats
TrendingPackages, NewPackages, PopularPackages []*Package
}
var Statistic Stats
func statistic() {
x.Iterate(new(Package), func(idx int, bean interface{}) error {
pkg := bean.(*Package)
Statistic.NumTotalDownload += pkg.DownloadCount
return nil
})
Statistic.NumPackages, _ = x.Count(new(Package))
Statistic.NumDownloaders, _ = x.Count(new(Downloader))
Statistic.TrendingPackages = make([]*Package, 0, 15)
x.Limit(15).Desc("recent_download").Find(&Statistic.TrendingPackages)
Statistic.NewPackages = make([]*Package, 0, 15)
x.Limit(15).Desc("created").Find(&Statistic.NewPackages)
Statistic.PopularPackages = make([]*Package, 0, 15)
x.Limit(15).Desc("download_count").Find(&Statistic.PopularPackages)
}
Mirror fix
// Copyright 2014 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package models
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/go-xorm/xorm"
"github.com/robfig/cron"
"github.com/gpmgo/switch/modules/log"
"github.com/gpmgo/switch/modules/setting"
)
var (
x *xorm.Engine
)
func init() {
var err error
x, err = xorm.NewEngine("mysql", fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8",
setting.Cfg.MustValue("database", "USER"),
setting.Cfg.MustValue("database", "PASSWD"),
setting.Cfg.MustValue("database", "HOST"),
setting.Cfg.MustValue("database", "NAME")))
if err != nil {
log.Fatal(4, "Fail to init new engine: %v", err)
} else if err = x.Sync(new(Package), new(Revision), new(Downloader)); err != nil {
log.Fatal(4, "Fail to sync database: %v", err)
}
statistic()
c := cron.New()
c.AddFunc("@every 5m", statistic)
c.Start()
}
func Ping() error {
return x.Ping()
}
type DownloadStats struct {
NumTotalDownload int64
}
type Stats struct {
NumPackages, NumDownloaders int64
DownloadStats
TrendingPackages, NewPackages, PopularPackages []*Package
}
var Statistic Stats
func statistic() {
x.Iterate(new(Package), func(idx int, bean interface{}) error {
pkg := bean.(*Package)
Statistic.NumTotalDownload += pkg.DownloadCount
return nil
})
Statistic.NumPackages, _ = x.Count(new(Package))
Statistic.NumDownloaders, _ = x.Count(new(Downloader))
Statistic.TrendingPackages = make([]*Package, 0, 15)
x.Limit(15).Desc("recent_download").Find(&Statistic.TrendingPackages)
Statistic.NewPackages = make([]*Package, 0, 15)
x.Limit(15).Desc("created").Find(&Statistic.NewPackages)
Statistic.PopularPackages = make([]*Package, 0, 15)
x.Limit(15).Desc("download_count").Find(&Statistic.PopularPackages)
}
|
package models
import (
"encoding/json"
"fmt"
"io/ioutil"
"os/exec"
"github.com/astaxie/beego"
)
var listener map[string][]string = nil
func Listener() map[string][]string {
if listener == nil {
fn := beego.AppConfig.String("listen")
data, err := ioutil.ReadFile(fn)
beego.Error(data)
if err != nil {
panic(err)
}
err = json.Unmarshal(data, &listener)
if err != nil {
panic(err)
}
}
return listener
}
func RunCommand(command []string) (string, error) {
fmt.Println("---------")
fmt.Println(command)
cmd := exec.Command(command[0], command[1:]...)
fmt.Println(cmd)
message, err := cmd.Output()
if err != nil {
beego.Error(err)
return "", err
}
if message != nil {
beego.Info(message)
return string(message), nil
} else {
return "", fmt.Errorf("except a message or error but nil")
}
}
func CheckErr(err error) {
if err != nil {
panic(err)
}
}
delete fmt.Println
package models
import (
"encoding/json"
"fmt"
"io/ioutil"
"os/exec"
"github.com/astaxie/beego"
)
var listener map[string][]string = nil
func Listener() map[string][]string {
if listener == nil {
fn := beego.AppConfig.String("listen")
data, err := ioutil.ReadFile(fn)
beego.Error(data)
if err != nil {
panic(err)
}
err = json.Unmarshal(data, &listener)
if err != nil {
panic(err)
}
}
return listener
}
func RunCommand(command []string) (string, error) {
cmd := exec.Command(command[0], command[1:]...)
message, err := cmd.Output()
if err != nil {
beego.Error(err)
return "", err
}
if message != nil {
beego.Info(message)
return string(message), nil
} else {
return "", fmt.Errorf("except a message or error but nil")
}
}
func CheckErr(err error) {
if err != nil {
panic(err)
}
}
|
package models
import (
"database/sql"
"fmt"
"log"
"sort"
"strings"
"time"
"github.com/brnstz/bus/common"
"github.com/jmoiron/sqlx"
)
const maxStops = 3
type Trip struct {
Id string
Headsign string
DirectionId int
}
type Service struct {
Id string
RouteId string
}
type ScheduledStopTime struct {
RouteId string `db:"route_id"`
StopId string `db:"stop_id"`
ServiceId string `db:"service_id"`
DepartureSec int `db:"departure_sec"`
}
func df(t time.Time) string {
return t.Format("2006-01-02")
}
type timeSlice []time.Time
func (p timeSlice) Len() int {
return len(p)
}
func (p timeSlice) Less(i, j int) bool {
return p[i].Before(p[j])
}
func (p timeSlice) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func NewScheduledStopTime(routeId, stopId, serviceId, timeStr string) (sst ScheduledStopTime, err error) {
dsec := common.TimeStrToSecs(timeStr)
sst = ScheduledStopTime{
RouteId: routeId,
StopId: stopId,
ServiceId: serviceId,
DepartureSec: dsec,
}
return
}
func (s ScheduledStopTime) String() string {
return fmt.Sprintf("{%v %v %v @ %v (%v)}",
s.RouteId, s.ServiceId, s.StopId,
common.SecsToTimeStr(s.DepartureSec), s.DepartureSec,
)
}
type Stop struct {
Id string `json:"stop_id" db:"stop_id"`
Name string `json:"stop_name" db:"stop_name"`
RouteId string `json:"route_id" db:"route_id"`
StationType string `json:"station_type" db:"stype"`
DirectionId int `json:"direction_id" db:"direction_id"`
Headsign string `json:"headsign" db:"headsign"`
Lat float64 `json:"lat" db:"lat"`
Lon float64 `json:"lon" db:"lon"`
Dist float64 `json:"dist" db:"dist"`
Scheduled []*Departure `json:"scheduled"`
Live []*Departure `json:"live"`
}
func (s Stop) String() string {
return fmt.Sprintf("{%v %v %v %v @ (%v,%v)}",
s.Id, s.Name, s.RouteId, s.Headsign, s.Lat, s.Lon,
)
}
func (s Stop) Key() string {
return fmt.Sprintf("%v%v", s.Id, s.RouteId)
}
func getServiceIdByDay(db sqlx.Ext, routeId, day string, now *time.Time) (serviceId string, err error) {
row := db.QueryRowx(`
SELECT service_id, route_id, max(start_date)
FROM service_route_day
WHERE day = $1 AND
end_date > $2 AND
route_id = $3
GROUP BY service_id, route_id
LIMIT 1
`, day, now, routeId,
)
var dummy1 string
var dummy2 time.Time
err = row.Scan(&serviceId, &dummy1, &dummy2)
if err != nil {
log.Println("can't scan service id", err, day, now, routeId)
return
}
return
}
func GetStopsByLoc(db sqlx.Ext, lat, lon, meters float64, filter string) (stops []*Stop, err error) {
stops = []*Stop{}
params := []interface{}{lat, lon, lat, lon, meters}
q := `
SELECT
stop_id,
stop_name,
direction_id,
headsign,
route_id,
stype,
latitude(location) AS lat,
longitude(location) AS lon,
earth_distance(location, ll_to_earth($1, $2)) AS dist
FROM stop
WHERE earth_box(ll_to_earth($3, $4), $5) @> location
`
if len(filter) > 0 {
q = q + ` AND stype = $6 `
params = append(params, filter)
}
q = q + ` ORDER BY dist ASC `
err = sqlx.Select(db, &stops, q, params...)
if err != nil {
log.Println("can't get stop", err)
return
}
now := time.Now()
for _, stop := range stops {
ydaysecs := []int64{}
todaysecs := []int64{}
allTimes := timeSlice{}
yesterday := now.Add(-time.Hour * 12)
yesterdayName := strings.ToLower(yesterday.Format("Monday"))
todayName := strings.ToLower(now.Format("Monday"))
if yesterdayName != todayName {
var yesterdayId string
// Looks for trips starting yesterday that arrive here
// after midnight
yesterdayId, err = getServiceIdByDay(
db, stop.RouteId, yesterdayName, &now,
)
if err == sql.ErrNoRows {
err = nil
log.Println("no rows, ok, moving on")
break
}
if err != nil {
log.Println("can't get yesterday id", err)
return
}
qYesterday := `
SELECT scheduled_stop_time.departure_sec
FROM scheduled_stop_time
WHERE route_id = $1 AND
stop_id = $2 AND
service_id = $3 AND
departure_sec >= 86400 AND
departure_sec > $4
ORDER BY departure_sec LIMIT $5
`
nowSecs :=
now.Hour()*3600 + now.Minute()*60 + now.Second() + 86400
err = sqlx.Select(db, &ydaysecs,
qYesterday, stop.RouteId, stop.Id,
yesterdayId, nowSecs, maxStops)
if err != nil {
log.Println("can't scan yesterday values", err)
return
}
yesterday = yesterday.Add(
-time.Hour * time.Duration(yesterday.Hour()))
yesterday = yesterday.Add(
-time.Minute * time.Duration(yesterday.Minute()))
yesterday = yesterday.Add(
-time.Second * time.Duration(yesterday.Second()))
yesterday = yesterday.Add(
-time.Nanosecond * time.Duration(yesterday.Nanosecond()))
for _, ydaysec := range ydaysecs {
thisTime := yesterday.Add(time.Second * time.Duration(ydaysec))
allTimes = append(allTimes, thisTime)
}
}
if true {
var todayId string
todayId, err = getServiceIdByDay(db, stop.RouteId, todayName, &now)
if err == sql.ErrNoRows {
err = nil
log.Println("no rows there", err)
break
}
if err != nil {
log.Println("can't get today id", err)
return
}
qToday := `
SELECT scheduled_stop_time.departure_sec
FROM scheduled_stop_time
WHERE route_id = $1 AND
stop_id = $2 AND
service_id = $3 AND
departure_sec > $4
ORDER BY departure_sec LIMIT $5
`
nowSecs := now.Hour()*3600 + now.Minute()*60 + now.Second()
err = sqlx.Select(db, &todaysecs, qToday, stop.RouteId, stop.Id,
todayId, nowSecs, maxStops)
today := now
today = today.Add(
-time.Hour * time.Duration(today.Hour()))
today = today.Add(
-time.Minute * time.Duration(today.Minute()))
today = today.Add(
-time.Second * time.Duration(today.Second()))
today = today.Add(
-time.Nanosecond * time.Duration(today.Nanosecond()))
for _, todaysec := range todaysecs {
thisTime := today.Add(time.Second * time.Duration(todaysec))
allTimes = append(allTimes, thisTime)
}
if err != nil {
log.Println("can't scan today values", err)
return
}
}
sort.Sort(allTimes)
for i, thisTime := range allTimes {
if i > maxStops {
break
}
stop.Scheduled = append(
stop.Scheduled, &Departure{Time: thisTime},
)
}
}
return stops, err
}
type ServiceRouteDay struct {
ServiceId string
RouteId string
Day string
StartDate time.Time
EndDate time.Time
}
func (s ServiceRouteDay) String() string {
return fmt.Sprintf("{%v %v %v %v %v}",
s.ServiceId, s.RouteId, s.Day, df(s.StartDate), df(s.EndDate),
)
}
type ServiceRouteException struct {
ServiceId string
RouteId string
ExceptionDate time.Time
}
type Departure struct {
Time time.Time `json:"time" db:"time"`
// FIXME: stops away? miles away?
}
return exactly one stop per route
package models
import (
"database/sql"
"fmt"
"log"
"sort"
"strings"
"time"
"github.com/brnstz/bus/common"
"github.com/jmoiron/sqlx"
)
const maxStops = 3
type Trip struct {
Id string
Headsign string
DirectionId int
}
type Service struct {
Id string
RouteId string
}
type ScheduledStopTime struct {
RouteId string `db:"route_id"`
StopId string `db:"stop_id"`
ServiceId string `db:"service_id"`
DepartureSec int `db:"departure_sec"`
}
func df(t time.Time) string {
return t.Format("2006-01-02")
}
type timeSlice []time.Time
func (p timeSlice) Len() int {
return len(p)
}
func (p timeSlice) Less(i, j int) bool {
return p[i].Before(p[j])
}
func (p timeSlice) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func NewScheduledStopTime(routeId, stopId, serviceId, timeStr string) (sst ScheduledStopTime, err error) {
dsec := common.TimeStrToSecs(timeStr)
sst = ScheduledStopTime{
RouteId: routeId,
StopId: stopId,
ServiceId: serviceId,
DepartureSec: dsec,
}
return
}
func (s ScheduledStopTime) String() string {
return fmt.Sprintf("{%v %v %v @ %v (%v)}",
s.RouteId, s.ServiceId, s.StopId,
common.SecsToTimeStr(s.DepartureSec), s.DepartureSec,
)
}
type Stop struct {
Id string `json:"stop_id" db:"stop_id"`
Name string `json:"stop_name" db:"stop_name"`
RouteId string `json:"route_id" db:"route_id"`
StationType string `json:"station_type" db:"stype"`
DirectionId int `json:"direction_id" db:"direction_id"`
Headsign string `json:"headsign" db:"headsign"`
Lat float64 `json:"lat" db:"lat"`
Lon float64 `json:"lon" db:"lon"`
Dist float64 `json:"dist" db:"dist"`
Scheduled []*Departure `json:"scheduled"`
Live []*Departure `json:"live"`
}
func (s Stop) String() string {
return fmt.Sprintf("{%v %v %v %v @ (%v,%v)}",
s.Id, s.Name, s.RouteId, s.Headsign, s.Lat, s.Lon,
)
}
func (s Stop) Key() string {
return fmt.Sprintf("%v%v", s.Id, s.RouteId)
}
func getServiceIdByDay(db sqlx.Ext, routeId, day string, now *time.Time) (serviceId string, err error) {
row := db.QueryRowx(`
SELECT service_id, route_id, max(start_date)
FROM service_route_day
WHERE day = $1 AND
end_date > $2 AND
route_id = $3
GROUP BY service_id, route_id
LIMIT 1
`, day, now, routeId,
)
var dummy1 string
var dummy2 time.Time
err = row.Scan(&serviceId, &dummy1, &dummy2)
if err != nil {
log.Println("can't scan service id", err, day, now, routeId)
return
}
return
}
func GetStopsByLoc(db sqlx.Ext, lat, lon, meters float64, filter string) (stops []*Stop, err error) {
stops = []*Stop{}
params := []interface{}{lat, lon, lat, lon, meters}
q := `
SELECT * FROM (
SELECT
DISTINCT ON (route_id, direction_id)
stop_id,
stop_name,
direction_id,
headsign,
route_id,
stype,
latitude(location) AS lat,
longitude(location) AS lon,
earth_distance(location, ll_to_earth($1, $2)) AS dist
FROM stop
WHERE earth_box(ll_to_earth($3, $4), $5) @> location
`
if len(filter) > 0 {
q = q + ` AND stype = $6 `
params = append(params, filter)
}
q = q + `
ORDER BY route_id, direction_id
) unique_routes
ORDER BY dist ASC
`
err = sqlx.Select(db, &stops, q, params...)
if err != nil {
log.Println("can't get stop", err)
return
}
now := time.Now()
for _, stop := range stops {
ydaysecs := []int64{}
todaysecs := []int64{}
allTimes := timeSlice{}
yesterday := now.Add(-time.Hour * 12)
yesterdayName := strings.ToLower(yesterday.Format("Monday"))
todayName := strings.ToLower(now.Format("Monday"))
if yesterdayName != todayName {
var yesterdayId string
// Looks for trips starting yesterday that arrive here
// after midnight
yesterdayId, err = getServiceIdByDay(
db, stop.RouteId, yesterdayName, &now,
)
if err == sql.ErrNoRows {
err = nil
log.Println("no rows, ok, moving on")
break
}
if err != nil {
log.Println("can't get yesterday id", err)
return
}
qYesterday := `
SELECT scheduled_stop_time.departure_sec
FROM scheduled_stop_time
WHERE route_id = $1 AND
stop_id = $2 AND
service_id = $3 AND
departure_sec >= 86400 AND
departure_sec > $4
ORDER BY departure_sec LIMIT $5
`
nowSecs :=
now.Hour()*3600 + now.Minute()*60 + now.Second() + 86400
err = sqlx.Select(db, &ydaysecs,
qYesterday, stop.RouteId, stop.Id,
yesterdayId, nowSecs, maxStops)
if err != nil {
log.Println("can't scan yesterday values", err)
return
}
yesterday = yesterday.Add(
-time.Hour * time.Duration(yesterday.Hour()))
yesterday = yesterday.Add(
-time.Minute * time.Duration(yesterday.Minute()))
yesterday = yesterday.Add(
-time.Second * time.Duration(yesterday.Second()))
yesterday = yesterday.Add(
-time.Nanosecond * time.Duration(yesterday.Nanosecond()))
for _, ydaysec := range ydaysecs {
thisTime := yesterday.Add(time.Second * time.Duration(ydaysec))
allTimes = append(allTimes, thisTime)
}
}
if true {
var todayId string
todayId, err = getServiceIdByDay(db, stop.RouteId, todayName, &now)
if err == sql.ErrNoRows {
err = nil
log.Println("no rows there", err)
break
}
if err != nil {
log.Println("can't get today id", err)
return
}
qToday := `
SELECT scheduled_stop_time.departure_sec
FROM scheduled_stop_time
WHERE route_id = $1 AND
stop_id = $2 AND
service_id = $3 AND
departure_sec > $4
ORDER BY departure_sec LIMIT $5
`
nowSecs := now.Hour()*3600 + now.Minute()*60 + now.Second()
err = sqlx.Select(db, &todaysecs, qToday, stop.RouteId, stop.Id,
todayId, nowSecs, maxStops)
today := now
today = today.Add(
-time.Hour * time.Duration(today.Hour()))
today = today.Add(
-time.Minute * time.Duration(today.Minute()))
today = today.Add(
-time.Second * time.Duration(today.Second()))
today = today.Add(
-time.Nanosecond * time.Duration(today.Nanosecond()))
for _, todaysec := range todaysecs {
thisTime := today.Add(time.Second * time.Duration(todaysec))
allTimes = append(allTimes, thisTime)
}
if err != nil {
log.Println("can't scan today values", err)
return
}
}
sort.Sort(allTimes)
for i, thisTime := range allTimes {
if i > maxStops {
break
}
stop.Scheduled = append(
stop.Scheduled, &Departure{Time: thisTime},
)
}
}
return stops, err
}
type ServiceRouteDay struct {
ServiceId string
RouteId string
Day string
StartDate time.Time
EndDate time.Time
}
func (s ServiceRouteDay) String() string {
return fmt.Sprintf("{%v %v %v %v %v}",
s.ServiceId, s.RouteId, s.Day, df(s.StartDate), df(s.EndDate),
)
}
type ServiceRouteException struct {
ServiceId string
RouteId string
ExceptionDate time.Time
}
type Departure struct {
Time time.Time `json:"time" db:"time"`
// FIXME: stops away? miles away?
}
|
// This package implements a provisioner for Packer that uses
// Chef to provision the remote machine, specifically with chef-client (that is,
// with a Chef server).
package chefclient
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/uuid"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/provisioner"
"github.com/hashicorp/packer/template/interpolate"
)
type guestOSTypeConfig struct {
executeCommand string
installCommand string
knifeCommand string
stagingDir string
}
var guestOSTypeConfigs = map[string]guestOSTypeConfig{
provisioner.UnixOSType: {
executeCommand: "{{if .Sudo}}sudo {{end}}chef-client --no-color -c {{.ConfigPath}} -j {{.JsonPath}}",
installCommand: "curl -L https://omnitruck.chef.io/install.sh | {{if .Sudo}}sudo {{end}}bash",
knifeCommand: "{{if .Sudo}}sudo {{end}}knife {{.Args}} {{.Flags}}",
stagingDir: "/tmp/packer-chef-client",
},
provisioner.WindowsOSType: {
executeCommand: "c:/opscode/chef/bin/chef-client.bat --no-color -c {{.ConfigPath}} -j {{.JsonPath}}",
installCommand: "powershell.exe -Command \". { iwr -useb https://omnitruck.chef.io/install.ps1 } | iex; install\"",
knifeCommand: "c:/opscode/chef/bin/knife.bat {{.Args}} {{.Flags}}",
stagingDir: "C:/Windows/Temp/packer-chef-client",
},
}
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Json map[string]interface{}
ChefEnvironment string `mapstructure:"chef_environment"`
ClientKey string `mapstructure:"client_key"`
ConfigTemplate string `mapstructure:"config_template"`
EncryptedDataBagSecretPath string `mapstructure:"encrypted_data_bag_secret_path"`
ExecuteCommand string `mapstructure:"execute_command"`
GuestOSType string `mapstructure:"guest_os_type"`
InstallCommand string `mapstructure:"install_command"`
KnifeCommand string `mapstructure:"knife_command"`
NodeName string `mapstructure:"node_name"`
PolicyGroup string `mapstructure:"policy_group"`
PolicyName string `mapstructure:"policy_name"`
PreventSudo bool `mapstructure:"prevent_sudo"`
RunList []string `mapstructure:"run_list"`
ServerUrl string `mapstructure:"server_url"`
SkipCleanClient bool `mapstructure:"skip_clean_client"`
SkipCleanNode bool `mapstructure:"skip_clean_node"`
SkipCleanStagingDirectory bool `mapstructure:"skip_clean_staging_directory"`
SkipInstall bool `mapstructure:"skip_install"`
SslVerifyMode string `mapstructure:"ssl_verify_mode"`
TrustedCertsDir string `mapstructure:"trusted_certs_dir"`
StagingDir string `mapstructure:"staging_directory"`
ValidationClientName string `mapstructure:"validation_client_name"`
ValidationKeyPath string `mapstructure:"validation_key_path"`
ctx interpolate.Context
}
type Provisioner struct {
config Config
guestOSTypeConfig guestOSTypeConfig
guestCommands *provisioner.GuestCommands
}
type ConfigTemplate struct {
ChefEnvironment string
ClientKey string
EncryptedDataBagSecretPath string
NodeName string
PolicyGroup string
PolicyName string
ServerUrl string
SslVerifyMode string
TrustedCertsDir string
ValidationClientName string
ValidationKeyPath string
}
type ExecuteTemplate struct {
ConfigPath string
JsonPath string
Sudo bool
}
type InstallChefTemplate struct {
Sudo bool
}
type KnifeTemplate struct {
Sudo bool
Flags string
Args string
}
func (p *Provisioner) Prepare(raws ...interface{}) error {
err := config.Decode(&p.config, &config.DecodeOpts{
Interpolate: true,
InterpolateContext: &p.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"execute_command",
"install_command",
"knife_command",
},
},
}, raws...)
if err != nil {
return err
}
if p.config.GuestOSType == "" {
p.config.GuestOSType = provisioner.DefaultOSType
}
p.config.GuestOSType = strings.ToLower(p.config.GuestOSType)
var ok bool
p.guestOSTypeConfig, ok = guestOSTypeConfigs[p.config.GuestOSType]
if !ok {
return fmt.Errorf("Invalid guest_os_type: \"%s\"", p.config.GuestOSType)
}
p.guestCommands, err = provisioner.NewGuestCommands(p.config.GuestOSType, !p.config.PreventSudo)
if err != nil {
return fmt.Errorf("Invalid guest_os_type: \"%s\"", p.config.GuestOSType)
}
if p.config.ExecuteCommand == "" {
p.config.ExecuteCommand = p.guestOSTypeConfig.executeCommand
}
if p.config.InstallCommand == "" {
p.config.InstallCommand = p.guestOSTypeConfig.installCommand
}
if p.config.RunList == nil {
p.config.RunList = make([]string, 0)
}
if p.config.StagingDir == "" {
p.config.StagingDir = p.guestOSTypeConfig.stagingDir
}
if p.config.KnifeCommand == "" {
p.config.KnifeCommand = p.guestOSTypeConfig.knifeCommand
}
var errs *packer.MultiError
if p.config.ConfigTemplate != "" {
fi, err := os.Stat(p.config.ConfigTemplate)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Bad config template path: %s", err))
} else if fi.IsDir() {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Config template path must be a file: %s", err))
}
}
if p.config.EncryptedDataBagSecretPath != "" {
pFileInfo, err := os.Stat(p.config.EncryptedDataBagSecretPath)
if err != nil || pFileInfo.IsDir() {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Bad encrypted data bag secret '%s': %s", p.config.EncryptedDataBagSecretPath, err))
}
}
if p.config.ServerUrl == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("server_url must be set"))
}
if p.config.EncryptedDataBagSecretPath != "" {
pFileInfo, err := os.Stat(p.config.EncryptedDataBagSecretPath)
if err != nil || pFileInfo.IsDir() {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Bad encrypted data bag secret '%s': %s", p.config.EncryptedDataBagSecretPath, err))
}
}
if (p.config.PolicyName != "") != (p.config.PolicyGroup != "") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("If either policy_name or policy_group are set, they must both be set."))
}
jsonValid := true
for k, v := range p.config.Json {
p.config.Json[k], err = p.deepJsonFix(k, v)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing JSON: %s", err))
jsonValid = false
}
}
if jsonValid {
// Process the user variables within the JSON and set the JSON.
// Do this early so that we can validate and show errors.
p.config.Json, err = p.processJsonUserVars()
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing user variables in JSON: %s", err))
}
}
if errs != nil && len(errs.Errors) > 0 {
return errs
}
return nil
}
func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
nodeName := p.config.NodeName
if nodeName == "" {
nodeName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
}
remoteValidationKeyPath := ""
serverUrl := p.config.ServerUrl
if !p.config.SkipInstall {
if err := p.installChef(ui, comm); err != nil {
return fmt.Errorf("Error installing Chef: %s", err)
}
}
if err := p.createDir(ui, comm, p.config.StagingDir); err != nil {
return fmt.Errorf("Error creating staging directory: %s", err)
}
if p.config.ClientKey == "" {
p.config.ClientKey = fmt.Sprintf("%s/client.pem", p.config.StagingDir)
}
encryptedDataBagSecretPath := ""
if p.config.EncryptedDataBagSecretPath != "" {
encryptedDataBagSecretPath = fmt.Sprintf("%s/encrypted_data_bag_secret", p.config.StagingDir)
if err := p.uploadFile(ui,
comm,
encryptedDataBagSecretPath,
p.config.EncryptedDataBagSecretPath); err != nil {
return fmt.Errorf("Error uploading encrypted data bag secret: %s", err)
}
}
if p.config.ValidationKeyPath != "" {
remoteValidationKeyPath = fmt.Sprintf("%s/validation.pem", p.config.StagingDir)
if err := p.uploadFile(ui, comm, remoteValidationKeyPath, p.config.ValidationKeyPath); err != nil {
return fmt.Errorf("Error copying validation key: %s", err)
}
}
configPath, err := p.createConfig(
ui,
comm,
nodeName,
serverUrl,
p.config.ClientKey,
encryptedDataBagSecretPath,
remoteValidationKeyPath,
p.config.ValidationClientName,
p.config.ChefEnvironment,
p.config.PolicyGroup,
p.config.PolicyName,
p.config.SslVerifyMode,
p.config.TrustedCertsDir)
if err != nil {
return fmt.Errorf("Error creating Chef config file: %s", err)
}
jsonPath, err := p.createJson(ui, comm)
if err != nil {
return fmt.Errorf("Error creating JSON attributes: %s", err)
}
err = p.executeChef(ui, comm, configPath, jsonPath)
if !(p.config.SkipCleanNode && p.config.SkipCleanClient) {
knifeConfigPath, knifeErr := p.createKnifeConfig(
ui, comm, nodeName, serverUrl, p.config.ClientKey, p.config.SslVerifyMode, p.config.TrustedCertsDir)
if knifeErr != nil {
return fmt.Errorf("Error creating knife config on node: %s", knifeErr)
}
if !p.config.SkipCleanNode {
if err := p.cleanNode(ui, comm, nodeName, knifeConfigPath); err != nil {
return fmt.Errorf("Error cleaning up chef node: %s", err)
}
}
if !p.config.SkipCleanClient {
if err := p.cleanClient(ui, comm, nodeName, knifeConfigPath); err != nil {
return fmt.Errorf("Error cleaning up chef client: %s", err)
}
}
}
if err != nil {
return fmt.Errorf("Error executing Chef: %s", err)
}
if !p.config.SkipCleanStagingDirectory {
if err := p.removeDir(ui, comm, p.config.StagingDir); err != nil {
return fmt.Errorf("Error removing %s: %s", p.config.StagingDir, err)
}
}
return nil
}
func (p *Provisioner) Cancel() {
// Just hard quit. It isn't a big deal if what we're doing keeps
// running on the other side.
os.Exit(0)
}
func (p *Provisioner) uploadFile(ui packer.Ui, comm packer.Communicator, remotePath string, localPath string) error {
ui.Message(fmt.Sprintf("Uploading %s...", localPath))
f, err := os.Open(localPath)
if err != nil {
return err
}
defer f.Close()
return comm.Upload(remotePath, f, nil)
}
func (p *Provisioner) createConfig(
ui packer.Ui,
comm packer.Communicator,
nodeName string,
serverUrl string,
clientKey string,
encryptedDataBagSecretPath,
remoteKeyPath string,
validationClientName string,
chefEnvironment string,
policyGroup string,
policyName string,
sslVerifyMode string,
trustedCertsDir string) (string, error) {
ui.Message("Creating configuration file 'client.rb'")
// Read the template
tpl := DefaultConfigTemplate
if p.config.ConfigTemplate != "" {
f, err := os.Open(p.config.ConfigTemplate)
if err != nil {
return "", err
}
defer f.Close()
tplBytes, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
tpl = string(tplBytes)
}
ctx := p.config.ctx
ctx.Data = &ConfigTemplate{
NodeName: nodeName,
ServerUrl: serverUrl,
ClientKey: clientKey,
ValidationKeyPath: remoteKeyPath,
ValidationClientName: validationClientName,
ChefEnvironment: chefEnvironment,
PolicyGroup: policyGroup,
PolicyName: policyName,
SslVerifyMode: sslVerifyMode,
TrustedCertsDir: trustedCertsDir,
EncryptedDataBagSecretPath: encryptedDataBagSecretPath,
}
configString, err := interpolate.Render(tpl, &ctx)
if err != nil {
return "", err
}
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "client.rb"))
if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil {
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createKnifeConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, clientKey string, sslVerifyMode string, trustedCertsDir string) (string, error) {
ui.Message("Creating configuration file 'knife.rb'")
// Read the template
tpl := DefaultKnifeTemplate
ctx := p.config.ctx
ctx.Data = &ConfigTemplate{
NodeName: nodeName,
ServerUrl: serverUrl,
ClientKey: clientKey,
SslVerifyMode: sslVerifyMode,
TrustedCertsDir: trustedCertsDir,
}
configString, err := interpolate.Render(tpl, &ctx)
if err != nil {
return "", err
}
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "knife.rb"))
if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil {
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createJson(ui packer.Ui, comm packer.Communicator) (string, error) {
ui.Message("Creating JSON attribute file")
jsonData := make(map[string]interface{})
// Copy the configured JSON
for k, v := range p.config.Json {
jsonData[k] = v
}
// Set the run list if it was specified
if len(p.config.RunList) > 0 {
jsonData["run_list"] = p.config.RunList
}
jsonBytes, err := json.MarshalIndent(jsonData, "", " ")
if err != nil {
return "", err
}
// Upload the bytes
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "first-boot.json"))
if err := comm.Upload(remotePath, bytes.NewReader(jsonBytes), nil); err != nil {
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Creating directory: %s", dir))
cmd := &packer.RemoteCmd{Command: p.guestCommands.CreateDir(dir)}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
}
// Chmod the directory to 0777 just so that we can access it as our user
cmd = &packer.RemoteCmd{Command: p.guestCommands.Chmod(dir, "0777")}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
}
return nil
}
func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node string, knifeConfigPath string) error {
ui.Say("Cleaning up chef node...")
args := []string{"node", "delete", node}
if err := p.knifeExec(ui, comm, node, knifeConfigPath, args); err != nil {
return fmt.Errorf("Failed to cleanup node: %s", err)
}
return nil
}
func (p *Provisioner) cleanClient(ui packer.Ui, comm packer.Communicator, node string, knifeConfigPath string) error {
ui.Say("Cleaning up chef client...")
args := []string{"client", "delete", node}
if err := p.knifeExec(ui, comm, node, knifeConfigPath, args); err != nil {
return fmt.Errorf("Failed to cleanup client: %s", err)
}
return nil
}
func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node string, knifeConfigPath string, args []string) error {
flags := []string{
"-y",
"-c", knifeConfigPath,
}
p.config.ctx.Data = &KnifeTemplate{
Sudo: !p.config.PreventSudo,
Flags: strings.Join(flags, " "),
Args: strings.Join(args, " "),
}
command, err := interpolate.Render(p.config.KnifeCommand, &p.config.ctx)
if err != nil {
return err
}
cmd := &packer.RemoteCmd{Command: command}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf(
"Non-zero exit status. See output above for more info.\n\n"+
"Command: %s",
command)
}
return nil
}
func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Removing directory: %s", dir))
cmd := &packer.RemoteCmd{Command: p.guestCommands.RemoveDir(dir)}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
return nil
}
func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config string, json string) error {
p.config.ctx.Data = &ExecuteTemplate{
ConfigPath: config,
JsonPath: json,
Sudo: !p.config.PreventSudo,
}
command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx)
if err != nil {
return err
}
ui.Message(fmt.Sprintf("Executing Chef: %s", command))
cmd := &packer.RemoteCmd{
Command: command,
}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus)
}
return nil
}
func (p *Provisioner) installChef(ui packer.Ui, comm packer.Communicator) error {
ui.Message("Installing Chef...")
p.config.ctx.Data = &InstallChefTemplate{
Sudo: !p.config.PreventSudo,
}
command, err := interpolate.Render(p.config.InstallCommand, &p.config.ctx)
if err != nil {
return err
}
ui.Message(command)
cmd := &packer.RemoteCmd{Command: command}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf(
"Install script exited with non-zero exit status %d", cmd.ExitStatus)
}
return nil
}
func (p *Provisioner) deepJsonFix(key string, current interface{}) (interface{}, error) {
if current == nil {
return nil, nil
}
switch c := current.(type) {
case []interface{}:
val := make([]interface{}, len(c))
for i, v := range c {
var err error
val[i], err = p.deepJsonFix(fmt.Sprintf("%s[%d]", key, i), v)
if err != nil {
return nil, err
}
}
return val, nil
case []uint8:
return string(c), nil
case map[interface{}]interface{}:
val := make(map[string]interface{})
for k, v := range c {
ks, ok := k.(string)
if !ok {
return nil, fmt.Errorf("%s: key is not string", key)
}
var err error
val[ks], err = p.deepJsonFix(
fmt.Sprintf("%s.%s", key, ks), v)
if err != nil {
return nil, err
}
}
return val, nil
default:
return current, nil
}
}
func (p *Provisioner) processJsonUserVars() (map[string]interface{}, error) {
jsonBytes, err := json.Marshal(p.config.Json)
if err != nil {
// This really shouldn't happen since we literally just unmarshalled
panic(err)
}
// Copy the user variables so that we can restore them later, and
// make sure we make the quotes JSON-friendly in the user variables.
originalUserVars := make(map[string]string)
for k, v := range p.config.ctx.UserVariables {
originalUserVars[k] = v
}
// Make sure we reset them no matter what
defer func() {
p.config.ctx.UserVariables = originalUserVars
}()
// Make the current user variables JSON string safe.
for k, v := range p.config.ctx.UserVariables {
v = strings.Replace(v, `\`, `\\`, -1)
v = strings.Replace(v, `"`, `\"`, -1)
p.config.ctx.UserVariables[k] = v
}
// Process the bytes with the template processor
p.config.ctx.Data = nil
jsonBytesProcessed, err := interpolate.Render(string(jsonBytes), &p.config.ctx)
if err != nil {
return nil, err
}
var result map[string]interface{}
if err := json.Unmarshal([]byte(jsonBytesProcessed), &result); err != nil {
return nil, err
}
return result, nil
}
var DefaultConfigTemplate = `
log_level :info
log_location STDOUT
chef_server_url "{{.ServerUrl}}"
client_key "{{.ClientKey}}"
{{if ne .EncryptedDataBagSecretPath ""}}
encrypted_data_bag_secret "{{.EncryptedDataBagSecretPath}}"
{{end}}
{{if ne .ValidationClientName ""}}
validation_client_name "{{.ValidationClientName}}"
{{else}}
validation_client_name "chef-validator"
{{end}}
{{if ne .ValidationKeyPath ""}}
validation_key "{{.ValidationKeyPath}}"
{{end}}
node_name "{{.NodeName}}"
{{if ne .ChefEnvironment ""}}
environment "{{.ChefEnvironment}}"
{{end}}
{{if ne .PolicyGroup ""}}
policy_group "{{.PolicyGroup}}"
{{end}}
{{if ne .PolicyName ""}}
policy_name "{{.PolicyName}}"
{{end}}
{{if ne .SslVerifyMode ""}}
ssl_verify_mode :{{.SslVerifyMode}}
{{end}}
{{if ne .TrustedCertsDir ""}}
trusted_certs_dir "{{.TrustedCertsDir}}"
{{end}}
`
var DefaultKnifeTemplate = `
log_level :info
log_location STDOUT
chef_server_url "{{.ServerUrl}}"
client_key "{{.ClientKey}}"
node_name "{{.NodeName}}"
{{if ne .SslVerifyMode ""}}
ssl_verify_mode :{{.SslVerifyMode}}
{{end}}
{{if ne .TrustedCertsDir ""}}
trusted_certs_dir "{{.TrustedCertsDir}}"
{{end}}
`
remove duplicate code from chef provisioner
// This package implements a provisioner for Packer that uses
// Chef to provision the remote machine, specifically with chef-client (that is,
// with a Chef server).
package chefclient
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/common/uuid"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/provisioner"
"github.com/hashicorp/packer/template/interpolate"
)
type guestOSTypeConfig struct {
executeCommand string
installCommand string
knifeCommand string
stagingDir string
}
var guestOSTypeConfigs = map[string]guestOSTypeConfig{
provisioner.UnixOSType: {
executeCommand: "{{if .Sudo}}sudo {{end}}chef-client --no-color -c {{.ConfigPath}} -j {{.JsonPath}}",
installCommand: "curl -L https://omnitruck.chef.io/install.sh | {{if .Sudo}}sudo {{end}}bash",
knifeCommand: "{{if .Sudo}}sudo {{end}}knife {{.Args}} {{.Flags}}",
stagingDir: "/tmp/packer-chef-client",
},
provisioner.WindowsOSType: {
executeCommand: "c:/opscode/chef/bin/chef-client.bat --no-color -c {{.ConfigPath}} -j {{.JsonPath}}",
installCommand: "powershell.exe -Command \". { iwr -useb https://omnitruck.chef.io/install.ps1 } | iex; install\"",
knifeCommand: "c:/opscode/chef/bin/knife.bat {{.Args}} {{.Flags}}",
stagingDir: "C:/Windows/Temp/packer-chef-client",
},
}
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Json map[string]interface{}
ChefEnvironment string `mapstructure:"chef_environment"`
ClientKey string `mapstructure:"client_key"`
ConfigTemplate string `mapstructure:"config_template"`
EncryptedDataBagSecretPath string `mapstructure:"encrypted_data_bag_secret_path"`
ExecuteCommand string `mapstructure:"execute_command"`
GuestOSType string `mapstructure:"guest_os_type"`
InstallCommand string `mapstructure:"install_command"`
KnifeCommand string `mapstructure:"knife_command"`
NodeName string `mapstructure:"node_name"`
PolicyGroup string `mapstructure:"policy_group"`
PolicyName string `mapstructure:"policy_name"`
PreventSudo bool `mapstructure:"prevent_sudo"`
RunList []string `mapstructure:"run_list"`
ServerUrl string `mapstructure:"server_url"`
SkipCleanClient bool `mapstructure:"skip_clean_client"`
SkipCleanNode bool `mapstructure:"skip_clean_node"`
SkipCleanStagingDirectory bool `mapstructure:"skip_clean_staging_directory"`
SkipInstall bool `mapstructure:"skip_install"`
SslVerifyMode string `mapstructure:"ssl_verify_mode"`
TrustedCertsDir string `mapstructure:"trusted_certs_dir"`
StagingDir string `mapstructure:"staging_directory"`
ValidationClientName string `mapstructure:"validation_client_name"`
ValidationKeyPath string `mapstructure:"validation_key_path"`
ctx interpolate.Context
}
type Provisioner struct {
config Config
guestOSTypeConfig guestOSTypeConfig
guestCommands *provisioner.GuestCommands
}
type ConfigTemplate struct {
ChefEnvironment string
ClientKey string
EncryptedDataBagSecretPath string
NodeName string
PolicyGroup string
PolicyName string
ServerUrl string
SslVerifyMode string
TrustedCertsDir string
ValidationClientName string
ValidationKeyPath string
}
type ExecuteTemplate struct {
ConfigPath string
JsonPath string
Sudo bool
}
type InstallChefTemplate struct {
Sudo bool
}
type KnifeTemplate struct {
Sudo bool
Flags string
Args string
}
func (p *Provisioner) Prepare(raws ...interface{}) error {
err := config.Decode(&p.config, &config.DecodeOpts{
Interpolate: true,
InterpolateContext: &p.config.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"execute_command",
"install_command",
"knife_command",
},
},
}, raws...)
if err != nil {
return err
}
if p.config.GuestOSType == "" {
p.config.GuestOSType = provisioner.DefaultOSType
}
p.config.GuestOSType = strings.ToLower(p.config.GuestOSType)
var ok bool
p.guestOSTypeConfig, ok = guestOSTypeConfigs[p.config.GuestOSType]
if !ok {
return fmt.Errorf("Invalid guest_os_type: \"%s\"", p.config.GuestOSType)
}
p.guestCommands, err = provisioner.NewGuestCommands(p.config.GuestOSType, !p.config.PreventSudo)
if err != nil {
return fmt.Errorf("Invalid guest_os_type: \"%s\"", p.config.GuestOSType)
}
if p.config.ExecuteCommand == "" {
p.config.ExecuteCommand = p.guestOSTypeConfig.executeCommand
}
if p.config.InstallCommand == "" {
p.config.InstallCommand = p.guestOSTypeConfig.installCommand
}
if p.config.RunList == nil {
p.config.RunList = make([]string, 0)
}
if p.config.StagingDir == "" {
p.config.StagingDir = p.guestOSTypeConfig.stagingDir
}
if p.config.KnifeCommand == "" {
p.config.KnifeCommand = p.guestOSTypeConfig.knifeCommand
}
var errs *packer.MultiError
if p.config.ConfigTemplate != "" {
fi, err := os.Stat(p.config.ConfigTemplate)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Bad config template path: %s", err))
} else if fi.IsDir() {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Config template path must be a file: %s", err))
}
}
if p.config.ServerUrl == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("server_url must be set"))
}
if p.config.EncryptedDataBagSecretPath != "" {
pFileInfo, err := os.Stat(p.config.EncryptedDataBagSecretPath)
if err != nil || pFileInfo.IsDir() {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Bad encrypted data bag secret '%s': %s", p.config.EncryptedDataBagSecretPath, err))
}
}
if (p.config.PolicyName != "") != (p.config.PolicyGroup != "") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("If either policy_name or policy_group are set, they must both be set."))
}
jsonValid := true
for k, v := range p.config.Json {
p.config.Json[k], err = p.deepJsonFix(k, v)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing JSON: %s", err))
jsonValid = false
}
}
if jsonValid {
// Process the user variables within the JSON and set the JSON.
// Do this early so that we can validate and show errors.
p.config.Json, err = p.processJsonUserVars()
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing user variables in JSON: %s", err))
}
}
if errs != nil && len(errs.Errors) > 0 {
return errs
}
return nil
}
func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
nodeName := p.config.NodeName
if nodeName == "" {
nodeName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
}
remoteValidationKeyPath := ""
serverUrl := p.config.ServerUrl
if !p.config.SkipInstall {
if err := p.installChef(ui, comm); err != nil {
return fmt.Errorf("Error installing Chef: %s", err)
}
}
if err := p.createDir(ui, comm, p.config.StagingDir); err != nil {
return fmt.Errorf("Error creating staging directory: %s", err)
}
if p.config.ClientKey == "" {
p.config.ClientKey = fmt.Sprintf("%s/client.pem", p.config.StagingDir)
}
encryptedDataBagSecretPath := ""
if p.config.EncryptedDataBagSecretPath != "" {
encryptedDataBagSecretPath = fmt.Sprintf("%s/encrypted_data_bag_secret", p.config.StagingDir)
if err := p.uploadFile(ui,
comm,
encryptedDataBagSecretPath,
p.config.EncryptedDataBagSecretPath); err != nil {
return fmt.Errorf("Error uploading encrypted data bag secret: %s", err)
}
}
if p.config.ValidationKeyPath != "" {
remoteValidationKeyPath = fmt.Sprintf("%s/validation.pem", p.config.StagingDir)
if err := p.uploadFile(ui, comm, remoteValidationKeyPath, p.config.ValidationKeyPath); err != nil {
return fmt.Errorf("Error copying validation key: %s", err)
}
}
configPath, err := p.createConfig(
ui,
comm,
nodeName,
serverUrl,
p.config.ClientKey,
encryptedDataBagSecretPath,
remoteValidationKeyPath,
p.config.ValidationClientName,
p.config.ChefEnvironment,
p.config.PolicyGroup,
p.config.PolicyName,
p.config.SslVerifyMode,
p.config.TrustedCertsDir)
if err != nil {
return fmt.Errorf("Error creating Chef config file: %s", err)
}
jsonPath, err := p.createJson(ui, comm)
if err != nil {
return fmt.Errorf("Error creating JSON attributes: %s", err)
}
err = p.executeChef(ui, comm, configPath, jsonPath)
if !(p.config.SkipCleanNode && p.config.SkipCleanClient) {
knifeConfigPath, knifeErr := p.createKnifeConfig(
ui, comm, nodeName, serverUrl, p.config.ClientKey, p.config.SslVerifyMode, p.config.TrustedCertsDir)
if knifeErr != nil {
return fmt.Errorf("Error creating knife config on node: %s", knifeErr)
}
if !p.config.SkipCleanNode {
if err := p.cleanNode(ui, comm, nodeName, knifeConfigPath); err != nil {
return fmt.Errorf("Error cleaning up chef node: %s", err)
}
}
if !p.config.SkipCleanClient {
if err := p.cleanClient(ui, comm, nodeName, knifeConfigPath); err != nil {
return fmt.Errorf("Error cleaning up chef client: %s", err)
}
}
}
if err != nil {
return fmt.Errorf("Error executing Chef: %s", err)
}
if !p.config.SkipCleanStagingDirectory {
if err := p.removeDir(ui, comm, p.config.StagingDir); err != nil {
return fmt.Errorf("Error removing %s: %s", p.config.StagingDir, err)
}
}
return nil
}
func (p *Provisioner) Cancel() {
// Just hard quit. It isn't a big deal if what we're doing keeps
// running on the other side.
os.Exit(0)
}
func (p *Provisioner) uploadFile(ui packer.Ui, comm packer.Communicator, remotePath string, localPath string) error {
ui.Message(fmt.Sprintf("Uploading %s...", localPath))
f, err := os.Open(localPath)
if err != nil {
return err
}
defer f.Close()
return comm.Upload(remotePath, f, nil)
}
func (p *Provisioner) createConfig(
ui packer.Ui,
comm packer.Communicator,
nodeName string,
serverUrl string,
clientKey string,
encryptedDataBagSecretPath,
remoteKeyPath string,
validationClientName string,
chefEnvironment string,
policyGroup string,
policyName string,
sslVerifyMode string,
trustedCertsDir string) (string, error) {
ui.Message("Creating configuration file 'client.rb'")
// Read the template
tpl := DefaultConfigTemplate
if p.config.ConfigTemplate != "" {
f, err := os.Open(p.config.ConfigTemplate)
if err != nil {
return "", err
}
defer f.Close()
tplBytes, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
tpl = string(tplBytes)
}
ctx := p.config.ctx
ctx.Data = &ConfigTemplate{
NodeName: nodeName,
ServerUrl: serverUrl,
ClientKey: clientKey,
ValidationKeyPath: remoteKeyPath,
ValidationClientName: validationClientName,
ChefEnvironment: chefEnvironment,
PolicyGroup: policyGroup,
PolicyName: policyName,
SslVerifyMode: sslVerifyMode,
TrustedCertsDir: trustedCertsDir,
EncryptedDataBagSecretPath: encryptedDataBagSecretPath,
}
configString, err := interpolate.Render(tpl, &ctx)
if err != nil {
return "", err
}
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "client.rb"))
if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil {
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createKnifeConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, clientKey string, sslVerifyMode string, trustedCertsDir string) (string, error) {
ui.Message("Creating configuration file 'knife.rb'")
// Read the template
tpl := DefaultKnifeTemplate
ctx := p.config.ctx
ctx.Data = &ConfigTemplate{
NodeName: nodeName,
ServerUrl: serverUrl,
ClientKey: clientKey,
SslVerifyMode: sslVerifyMode,
TrustedCertsDir: trustedCertsDir,
}
configString, err := interpolate.Render(tpl, &ctx)
if err != nil {
return "", err
}
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "knife.rb"))
if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil {
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createJson(ui packer.Ui, comm packer.Communicator) (string, error) {
ui.Message("Creating JSON attribute file")
jsonData := make(map[string]interface{})
// Copy the configured JSON
for k, v := range p.config.Json {
jsonData[k] = v
}
// Set the run list if it was specified
if len(p.config.RunList) > 0 {
jsonData["run_list"] = p.config.RunList
}
jsonBytes, err := json.MarshalIndent(jsonData, "", " ")
if err != nil {
return "", err
}
// Upload the bytes
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "first-boot.json"))
if err := comm.Upload(remotePath, bytes.NewReader(jsonBytes), nil); err != nil {
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Creating directory: %s", dir))
cmd := &packer.RemoteCmd{Command: p.guestCommands.CreateDir(dir)}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
}
// Chmod the directory to 0777 just so that we can access it as our user
cmd = &packer.RemoteCmd{Command: p.guestCommands.Chmod(dir, "0777")}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf("Non-zero exit status. See output above for more info.")
}
return nil
}
func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node string, knifeConfigPath string) error {
ui.Say("Cleaning up chef node...")
args := []string{"node", "delete", node}
if err := p.knifeExec(ui, comm, node, knifeConfigPath, args); err != nil {
return fmt.Errorf("Failed to cleanup node: %s", err)
}
return nil
}
func (p *Provisioner) cleanClient(ui packer.Ui, comm packer.Communicator, node string, knifeConfigPath string) error {
ui.Say("Cleaning up chef client...")
args := []string{"client", "delete", node}
if err := p.knifeExec(ui, comm, node, knifeConfigPath, args); err != nil {
return fmt.Errorf("Failed to cleanup client: %s", err)
}
return nil
}
func (p *Provisioner) knifeExec(ui packer.Ui, comm packer.Communicator, node string, knifeConfigPath string, args []string) error {
flags := []string{
"-y",
"-c", knifeConfigPath,
}
p.config.ctx.Data = &KnifeTemplate{
Sudo: !p.config.PreventSudo,
Flags: strings.Join(flags, " "),
Args: strings.Join(args, " "),
}
command, err := interpolate.Render(p.config.KnifeCommand, &p.config.ctx)
if err != nil {
return err
}
cmd := &packer.RemoteCmd{Command: command}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf(
"Non-zero exit status. See output above for more info.\n\n"+
"Command: %s",
command)
}
return nil
}
func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Removing directory: %s", dir))
cmd := &packer.RemoteCmd{Command: p.guestCommands.RemoveDir(dir)}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
return nil
}
func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config string, json string) error {
p.config.ctx.Data = &ExecuteTemplate{
ConfigPath: config,
JsonPath: json,
Sudo: !p.config.PreventSudo,
}
command, err := interpolate.Render(p.config.ExecuteCommand, &p.config.ctx)
if err != nil {
return err
}
ui.Message(fmt.Sprintf("Executing Chef: %s", command))
cmd := &packer.RemoteCmd{
Command: command,
}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus)
}
return nil
}
func (p *Provisioner) installChef(ui packer.Ui, comm packer.Communicator) error {
ui.Message("Installing Chef...")
p.config.ctx.Data = &InstallChefTemplate{
Sudo: !p.config.PreventSudo,
}
command, err := interpolate.Render(p.config.InstallCommand, &p.config.ctx)
if err != nil {
return err
}
ui.Message(command)
cmd := &packer.RemoteCmd{Command: command}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf(
"Install script exited with non-zero exit status %d", cmd.ExitStatus)
}
return nil
}
func (p *Provisioner) deepJsonFix(key string, current interface{}) (interface{}, error) {
if current == nil {
return nil, nil
}
switch c := current.(type) {
case []interface{}:
val := make([]interface{}, len(c))
for i, v := range c {
var err error
val[i], err = p.deepJsonFix(fmt.Sprintf("%s[%d]", key, i), v)
if err != nil {
return nil, err
}
}
return val, nil
case []uint8:
return string(c), nil
case map[interface{}]interface{}:
val := make(map[string]interface{})
for k, v := range c {
ks, ok := k.(string)
if !ok {
return nil, fmt.Errorf("%s: key is not string", key)
}
var err error
val[ks], err = p.deepJsonFix(
fmt.Sprintf("%s.%s", key, ks), v)
if err != nil {
return nil, err
}
}
return val, nil
default:
return current, nil
}
}
func (p *Provisioner) processJsonUserVars() (map[string]interface{}, error) {
jsonBytes, err := json.Marshal(p.config.Json)
if err != nil {
// This really shouldn't happen since we literally just unmarshalled
panic(err)
}
// Copy the user variables so that we can restore them later, and
// make sure we make the quotes JSON-friendly in the user variables.
originalUserVars := make(map[string]string)
for k, v := range p.config.ctx.UserVariables {
originalUserVars[k] = v
}
// Make sure we reset them no matter what
defer func() {
p.config.ctx.UserVariables = originalUserVars
}()
// Make the current user variables JSON string safe.
for k, v := range p.config.ctx.UserVariables {
v = strings.Replace(v, `\`, `\\`, -1)
v = strings.Replace(v, `"`, `\"`, -1)
p.config.ctx.UserVariables[k] = v
}
// Process the bytes with the template processor
p.config.ctx.Data = nil
jsonBytesProcessed, err := interpolate.Render(string(jsonBytes), &p.config.ctx)
if err != nil {
return nil, err
}
var result map[string]interface{}
if err := json.Unmarshal([]byte(jsonBytesProcessed), &result); err != nil {
return nil, err
}
return result, nil
}
var DefaultConfigTemplate = `
log_level :info
log_location STDOUT
chef_server_url "{{.ServerUrl}}"
client_key "{{.ClientKey}}"
{{if ne .EncryptedDataBagSecretPath ""}}
encrypted_data_bag_secret "{{.EncryptedDataBagSecretPath}}"
{{end}}
{{if ne .ValidationClientName ""}}
validation_client_name "{{.ValidationClientName}}"
{{else}}
validation_client_name "chef-validator"
{{end}}
{{if ne .ValidationKeyPath ""}}
validation_key "{{.ValidationKeyPath}}"
{{end}}
node_name "{{.NodeName}}"
{{if ne .ChefEnvironment ""}}
environment "{{.ChefEnvironment}}"
{{end}}
{{if ne .PolicyGroup ""}}
policy_group "{{.PolicyGroup}}"
{{end}}
{{if ne .PolicyName ""}}
policy_name "{{.PolicyName}}"
{{end}}
{{if ne .SslVerifyMode ""}}
ssl_verify_mode :{{.SslVerifyMode}}
{{end}}
{{if ne .TrustedCertsDir ""}}
trusted_certs_dir "{{.TrustedCertsDir}}"
{{end}}
`
var DefaultKnifeTemplate = `
log_level :info
log_location STDOUT
chef_server_url "{{.ServerUrl}}"
client_key "{{.ClientKey}}"
node_name "{{.NodeName}}"
{{if ne .SslVerifyMode ""}}
ssl_verify_mode :{{.SslVerifyMode}}
{{end}}
{{if ne .TrustedCertsDir ""}}
trusted_certs_dir "{{.TrustedCertsDir}}"
{{end}}
`
|
// This package implements a provisioner for Packer that uses
// Chef to provision the remote machine, specifically with chef-client (that is,
// with a Chef server).
package chefclient
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/common/uuid"
"github.com/mitchellh/packer/packer"
)
type Config struct {
common.PackerConfig `mapstructure:",squash"`
ChefEnvironment string `mapstructure:"chef_environment"`
ConfigTemplate string `mapstructure:"config_template"`
ExecuteCommand string `mapstructure:"execute_command"`
InstallCommand string `mapstructure:"install_command"`
Json map[string]interface{}
NodeName string `mapstructure:"node_name"`
PreventSudo bool `mapstructure:"prevent_sudo"`
RunList []string `mapstructure:"run_list"`
ServerUrl string `mapstructure:"server_url"`
SkipCleanClient bool `mapstructure:"skip_clean_client"`
SkipCleanNode bool `mapstructure:"skip_clean_node"`
SkipInstall bool `mapstructure:"skip_install"`
StagingDir string `mapstructure:"staging_directory"`
ValidationKeyPath string `mapstructure:"validation_key_path"`
ValidationClientName string `mapstructure:"validation_client_name"`
tpl *packer.ConfigTemplate
}
type Provisioner struct {
config Config
}
type ConfigTemplate struct {
NodeName string
ServerUrl string
ValidationKeyPath string
ValidationClientName string
ChefEnvironment string
}
type ExecuteTemplate struct {
ConfigPath string
JsonPath string
Sudo bool
}
type InstallChefTemplate struct {
Sudo bool
}
func (p *Provisioner) Prepare(raws ...interface{}) error {
md, err := common.DecodeConfig(&p.config, raws...)
if err != nil {
return err
}
p.config.tpl, err = packer.NewConfigTemplate()
if err != nil {
return err
}
p.config.tpl.UserVars = p.config.PackerUserVars
// Accumulate any errors
errs := common.CheckUnusedConfig(md)
templates := map[string]*string{
"chef_environment": &p.config.ChefEnvironment,
"config_template": &p.config.ConfigTemplate,
"node_name": &p.config.NodeName,
"staging_dir": &p.config.StagingDir,
"chef_server_url": &p.config.ServerUrl,
"execute_command": &p.config.ExecuteCommand,
"install_command": &p.config.InstallCommand,
"validation_key_path": &p.config.ValidationKeyPath,
"validation_client_name": &p.config.ValidationClientName,
}
for n, ptr := range templates {
var err error
*ptr, err = p.config.tpl.Process(*ptr, nil)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing %s: %s", n, err))
}
}
if p.config.ExecuteCommand == "" {
p.config.ExecuteCommand = "{{if .Sudo}}sudo {{end}}chef-client " +
"--no-color -c {{.ConfigPath}} -j {{.JsonPath}}"
}
if p.config.InstallCommand == "" {
p.config.InstallCommand = "curl -L " +
"https://www.opscode.com/chef/install.sh | " +
"{{if .Sudo}}sudo {{end}}bash"
}
if p.config.RunList == nil {
p.config.RunList = make([]string, 0)
}
if p.config.StagingDir == "" {
p.config.StagingDir = "/tmp/packer-chef-client"
}
sliceTemplates := map[string][]string{
"run_list": p.config.RunList,
}
for n, slice := range sliceTemplates {
for i, elem := range slice {
var err error
slice[i], err = p.config.tpl.Process(elem, nil)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err))
}
}
}
validates := map[string]*string{
"execute_command": &p.config.ExecuteCommand,
"install_command": &p.config.InstallCommand,
}
for n, ptr := range validates {
if err := p.config.tpl.Validate(*ptr); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error parsing %s: %s", n, err))
}
}
if p.config.ConfigTemplate != "" {
fi, err := os.Stat(p.config.ConfigTemplate)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Bad config template path: %s", err))
} else if fi.IsDir() {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Config template path must be a file: %s", err))
}
}
if p.config.ServerUrl == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("server_url must be set"))
}
jsonValid := true
for k, v := range p.config.Json {
p.config.Json[k], err = p.deepJsonFix(k, v)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing JSON: %s", err))
jsonValid = false
}
}
if jsonValid {
// Process the user variables within the JSON and set the JSON.
// Do this early so that we can validate and show errors.
p.config.Json, err = p.processJsonUserVars()
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing user variables in JSON: %s", err))
}
}
if errs != nil && len(errs.Errors) > 0 {
return errs
}
return nil
}
func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
nodeName := p.config.NodeName
if nodeName == "" {
nodeName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
}
remoteValidationKeyPath := ""
serverUrl := p.config.ServerUrl
if !p.config.SkipInstall {
if err := p.installChef(ui, comm); err != nil {
return fmt.Errorf("Error installing Chef: %s", err)
}
}
if err := p.createDir(ui, comm, p.config.StagingDir); err != nil {
return fmt.Errorf("Error creating staging directory: %s", err)
}
if p.config.ValidationKeyPath != "" {
remoteValidationKeyPath = fmt.Sprintf("%s/validation.pem", p.config.StagingDir)
if err := p.copyValidationKey(ui, comm, remoteValidationKeyPath); err != nil {
return fmt.Errorf("Error copying validation key: %s", err)
}
}
configPath, err := p.createConfig(
ui, comm, nodeName, serverUrl, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment)
if err != nil {
return fmt.Errorf("Error creating Chef config file: %s", err)
}
jsonPath, err := p.createJson(ui, comm)
if err != nil {
return fmt.Errorf("Error creating JSON attributes: %s", err)
}
err = p.executeChef(ui, comm, configPath, jsonPath)
if !p.config.SkipCleanNode {
if err2 := p.cleanNode(ui, comm, nodeName); err2 != nil {
return fmt.Errorf("Error cleaning up chef node: %s", err2)
}
}
if !p.config.SkipCleanClient {
if err2 := p.cleanClient(ui, comm, nodeName); err2 != nil {
return fmt.Errorf("Error cleaning up chef client: %s", err2)
}
}
if err != nil {
return fmt.Errorf("Error executing Chef: %s", err)
}
if err := p.removeDir(ui, comm, p.config.StagingDir); err != nil {
return fmt.Errorf("Error removing /etc/chef directory: %s", err)
}
return nil
}
func (p *Provisioner) Cancel() {
// Just hard quit. It isn't a big deal if what we're doing keeps
// running on the other side.
os.Exit(0)
}
func (p *Provisioner) uploadDirectory(ui packer.Ui, comm packer.Communicator, dst string, src string) error {
if err := p.createDir(ui, comm, dst); err != nil {
return err
}
// Make sure there is a trailing "/" so that the directory isn't
// created on the other side.
if src[len(src)-1] != '/' {
src = src + "/"
}
return comm.UploadDir(dst, src, nil)
}
func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, remoteKeyPath string, validationClientName string, chefEnvironment string) (string, error) {
ui.Message("Creating configuration file 'client.rb'")
// Read the template
tpl := DefaultConfigTemplate
if p.config.ConfigTemplate != "" {
f, err := os.Open(p.config.ConfigTemplate)
if err != nil {
return "", err
}
defer f.Close()
tplBytes, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
tpl = string(tplBytes)
}
configString, err := p.config.tpl.Process(tpl, &ConfigTemplate{
NodeName: nodeName,
ServerUrl: serverUrl,
ValidationKeyPath: remoteKeyPath,
ValidationClientName: validationClientName,
ChefEnvironment: chefEnvironment,
})
if err != nil {
return "", err
}
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "client.rb"))
if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil {
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createJson(ui packer.Ui, comm packer.Communicator) (string, error) {
ui.Message("Creating JSON attribute file")
jsonData := make(map[string]interface{})
// Copy the configured JSON
for k, v := range p.config.Json {
jsonData[k] = v
}
// Set the run list if it was specified
if len(p.config.RunList) > 0 {
jsonData["run_list"] = p.config.RunList
}
jsonBytes, err := json.MarshalIndent(jsonData, "", " ")
if err != nil {
return "", err
}
// Upload the bytes
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "first-boot.json"))
if err := comm.Upload(remotePath, bytes.NewReader(jsonBytes), nil); err != nil {
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Creating directory: %s", dir))
cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("sudo mkdir -p '%s'", dir),
}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf("Non-zero exit status.")
}
return nil
}
func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node string) error {
ui.Say("Cleaning up chef node...")
app := fmt.Sprintf("knife node delete %s -y", node)
cmd := exec.Command("sh", "-c", app)
out, err := cmd.Output()
ui.Message(fmt.Sprintf("%s", out))
if err != nil {
return err
}
return nil
}
func (p *Provisioner) cleanClient(ui packer.Ui, comm packer.Communicator, node string) error {
ui.Say("Cleaning up chef client...")
app := fmt.Sprintf("knife client delete %s -y", node)
cmd := exec.Command("sh", "-c", app)
out, err := cmd.Output()
ui.Message(fmt.Sprintf("%s", out))
if err != nil {
return err
}
return nil
}
func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Removing directory: %s", dir))
cmd := &packer.RemoteCmd{
Command: fmt.Sprintf("sudo rm -rf %s", dir),
}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
return nil
}
func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config string, json string) error {
command, err := p.config.tpl.Process(p.config.ExecuteCommand, &ExecuteTemplate{
ConfigPath: config,
JsonPath: json,
Sudo: !p.config.PreventSudo,
})
if err != nil {
return err
}
ui.Message(fmt.Sprintf("Executing Chef: %s", command))
cmd := &packer.RemoteCmd{
Command: command,
}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus)
}
return nil
}
func (p *Provisioner) installChef(ui packer.Ui, comm packer.Communicator) error {
ui.Message("Installing Chef...")
command, err := p.config.tpl.Process(p.config.InstallCommand, &InstallChefTemplate{
Sudo: !p.config.PreventSudo,
})
if err != nil {
return err
}
cmd := &packer.RemoteCmd{Command: command}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf(
"Install script exited with non-zero exit status %d", cmd.ExitStatus)
}
return nil
}
func (p *Provisioner) copyValidationKey(ui packer.Ui, comm packer.Communicator, remotePath string) error {
ui.Message("Uploading validation key...")
// First upload the validation key to a writable location
f, err := os.Open(p.config.ValidationKeyPath)
if err != nil {
return err
}
defer f.Close()
if err := comm.Upload(remotePath, f, nil); err != nil {
return err
}
return nil
}
func (p *Provisioner) deepJsonFix(key string, current interface{}) (interface{}, error) {
if current == nil {
return nil, nil
}
switch c := current.(type) {
case []interface{}:
val := make([]interface{}, len(c))
for i, v := range c {
var err error
val[i], err = p.deepJsonFix(fmt.Sprintf("%s[%d]", key, i), v)
if err != nil {
return nil, err
}
}
return val, nil
case []uint8:
return string(c), nil
case map[interface{}]interface{}:
val := make(map[string]interface{})
for k, v := range c {
ks, ok := k.(string)
if !ok {
return nil, fmt.Errorf("%s: key is not string", key)
}
var err error
val[ks], err = p.deepJsonFix(
fmt.Sprintf("%s.%s", key, ks), v)
if err != nil {
return nil, err
}
}
return val, nil
default:
return current, nil
}
}
func (p *Provisioner) processJsonUserVars() (map[string]interface{}, error) {
jsonBytes, err := json.Marshal(p.config.Json)
if err != nil {
// This really shouldn't happen since we literally just unmarshalled
panic(err)
}
// Copy the user variables so that we can restore them later, and
// make sure we make the quotes JSON-friendly in the user variables.
originalUserVars := make(map[string]string)
for k, v := range p.config.tpl.UserVars {
originalUserVars[k] = v
}
// Make sure we reset them no matter what
defer func() {
p.config.tpl.UserVars = originalUserVars
}()
// Make the current user variables JSON string safe.
for k, v := range p.config.tpl.UserVars {
v = strings.Replace(v, `\`, `\\`, -1)
v = strings.Replace(v, `"`, `\"`, -1)
p.config.tpl.UserVars[k] = v
}
// Process the bytes with the template processor
jsonBytesProcessed, err := p.config.tpl.Process(string(jsonBytes), nil)
if err != nil {
return nil, err
}
var result map[string]interface{}
if err := json.Unmarshal([]byte(jsonBytesProcessed), &result); err != nil {
return nil, err
}
return result, nil
}
var DefaultConfigTemplate = `
log_level :info
log_location STDOUT
chef_server_url "{{.ServerUrl}}"
{{if ne .ValidationClientName ""}}
validation_client_name "{{.ValidationClientName}}"
{{else}}
validation_client_name "chef-validator"
{{end}}
{{if ne .ValidationKeyPath ""}}
validation_key "{{.ValidationKeyPath}}"
{{end}}
node_name "{{.NodeName}}"
{{if ne .ChefEnvironment ""}}
environment "{{.ChefEnvironment}}"
{{end}}
`
Use sudo only if prevent_sudo is not set
// This package implements a provisioner for Packer that uses
// Chef to provision the remote machine, specifically with chef-client (that is,
// with a Chef server).
package chefclient
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/common/uuid"
"github.com/mitchellh/packer/packer"
)
type Config struct {
common.PackerConfig `mapstructure:",squash"`
ChefEnvironment string `mapstructure:"chef_environment"`
ConfigTemplate string `mapstructure:"config_template"`
ExecuteCommand string `mapstructure:"execute_command"`
InstallCommand string `mapstructure:"install_command"`
Json map[string]interface{}
NodeName string `mapstructure:"node_name"`
PreventSudo bool `mapstructure:"prevent_sudo"`
RunList []string `mapstructure:"run_list"`
ServerUrl string `mapstructure:"server_url"`
SkipCleanClient bool `mapstructure:"skip_clean_client"`
SkipCleanNode bool `mapstructure:"skip_clean_node"`
SkipInstall bool `mapstructure:"skip_install"`
StagingDir string `mapstructure:"staging_directory"`
ValidationKeyPath string `mapstructure:"validation_key_path"`
ValidationClientName string `mapstructure:"validation_client_name"`
tpl *packer.ConfigTemplate
}
type Provisioner struct {
config Config
}
type ConfigTemplate struct {
NodeName string
ServerUrl string
ValidationKeyPath string
ValidationClientName string
ChefEnvironment string
}
type ExecuteTemplate struct {
ConfigPath string
JsonPath string
Sudo bool
}
type InstallChefTemplate struct {
Sudo bool
}
func (p *Provisioner) Prepare(raws ...interface{}) error {
md, err := common.DecodeConfig(&p.config, raws...)
if err != nil {
return err
}
p.config.tpl, err = packer.NewConfigTemplate()
if err != nil {
return err
}
p.config.tpl.UserVars = p.config.PackerUserVars
// Accumulate any errors
errs := common.CheckUnusedConfig(md)
templates := map[string]*string{
"chef_environment": &p.config.ChefEnvironment,
"config_template": &p.config.ConfigTemplate,
"node_name": &p.config.NodeName,
"staging_dir": &p.config.StagingDir,
"chef_server_url": &p.config.ServerUrl,
"execute_command": &p.config.ExecuteCommand,
"install_command": &p.config.InstallCommand,
"validation_key_path": &p.config.ValidationKeyPath,
"validation_client_name": &p.config.ValidationClientName,
}
for n, ptr := range templates {
var err error
*ptr, err = p.config.tpl.Process(*ptr, nil)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing %s: %s", n, err))
}
}
if p.config.ExecuteCommand == "" {
p.config.ExecuteCommand = "{{if .Sudo}}sudo {{end}}chef-client " +
"--no-color -c {{.ConfigPath}} -j {{.JsonPath}}"
}
if p.config.InstallCommand == "" {
p.config.InstallCommand = "curl -L " +
"https://www.opscode.com/chef/install.sh | " +
"{{if .Sudo}}sudo {{end}}bash"
}
if p.config.RunList == nil {
p.config.RunList = make([]string, 0)
}
if p.config.StagingDir == "" {
p.config.StagingDir = "/tmp/packer-chef-client"
}
sliceTemplates := map[string][]string{
"run_list": p.config.RunList,
}
for n, slice := range sliceTemplates {
for i, elem := range slice {
var err error
slice[i], err = p.config.tpl.Process(elem, nil)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing %s[%d]: %s", n, i, err))
}
}
}
validates := map[string]*string{
"execute_command": &p.config.ExecuteCommand,
"install_command": &p.config.InstallCommand,
}
for n, ptr := range validates {
if err := p.config.tpl.Validate(*ptr); err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error parsing %s: %s", n, err))
}
}
if p.config.ConfigTemplate != "" {
fi, err := os.Stat(p.config.ConfigTemplate)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Bad config template path: %s", err))
} else if fi.IsDir() {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Config template path must be a file: %s", err))
}
}
if p.config.ServerUrl == "" {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("server_url must be set"))
}
jsonValid := true
for k, v := range p.config.Json {
p.config.Json[k], err = p.deepJsonFix(k, v)
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing JSON: %s", err))
jsonValid = false
}
}
if jsonValid {
// Process the user variables within the JSON and set the JSON.
// Do this early so that we can validate and show errors.
p.config.Json, err = p.processJsonUserVars()
if err != nil {
errs = packer.MultiErrorAppend(
errs, fmt.Errorf("Error processing user variables in JSON: %s", err))
}
}
if errs != nil && len(errs.Errors) > 0 {
return errs
}
return nil
}
func (p *Provisioner) Provision(ui packer.Ui, comm packer.Communicator) error {
nodeName := p.config.NodeName
if nodeName == "" {
nodeName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
}
remoteValidationKeyPath := ""
serverUrl := p.config.ServerUrl
if !p.config.SkipInstall {
if err := p.installChef(ui, comm); err != nil {
return fmt.Errorf("Error installing Chef: %s", err)
}
}
if err := p.createDir(ui, comm, p.config.StagingDir); err != nil {
return fmt.Errorf("Error creating staging directory: %s", err)
}
if p.config.ValidationKeyPath != "" {
remoteValidationKeyPath = fmt.Sprintf("%s/validation.pem", p.config.StagingDir)
if err := p.copyValidationKey(ui, comm, remoteValidationKeyPath); err != nil {
return fmt.Errorf("Error copying validation key: %s", err)
}
}
configPath, err := p.createConfig(
ui, comm, nodeName, serverUrl, remoteValidationKeyPath, p.config.ValidationClientName, p.config.ChefEnvironment)
if err != nil {
return fmt.Errorf("Error creating Chef config file: %s", err)
}
jsonPath, err := p.createJson(ui, comm)
if err != nil {
return fmt.Errorf("Error creating JSON attributes: %s", err)
}
err = p.executeChef(ui, comm, configPath, jsonPath)
if !p.config.SkipCleanNode {
if err2 := p.cleanNode(ui, comm, nodeName); err2 != nil {
return fmt.Errorf("Error cleaning up chef node: %s", err2)
}
}
if !p.config.SkipCleanClient {
if err2 := p.cleanClient(ui, comm, nodeName); err2 != nil {
return fmt.Errorf("Error cleaning up chef client: %s", err2)
}
}
if err != nil {
return fmt.Errorf("Error executing Chef: %s", err)
}
if err := p.removeDir(ui, comm, p.config.StagingDir); err != nil {
return fmt.Errorf("Error removing /etc/chef directory: %s", err)
}
return nil
}
func (p *Provisioner) Cancel() {
// Just hard quit. It isn't a big deal if what we're doing keeps
// running on the other side.
os.Exit(0)
}
func (p *Provisioner) uploadDirectory(ui packer.Ui, comm packer.Communicator, dst string, src string) error {
if err := p.createDir(ui, comm, dst); err != nil {
return err
}
// Make sure there is a trailing "/" so that the directory isn't
// created on the other side.
if src[len(src)-1] != '/' {
src = src + "/"
}
return comm.UploadDir(dst, src, nil)
}
func (p *Provisioner) createConfig(ui packer.Ui, comm packer.Communicator, nodeName string, serverUrl string, remoteKeyPath string, validationClientName string, chefEnvironment string) (string, error) {
ui.Message("Creating configuration file 'client.rb'")
// Read the template
tpl := DefaultConfigTemplate
if p.config.ConfigTemplate != "" {
f, err := os.Open(p.config.ConfigTemplate)
if err != nil {
return "", err
}
defer f.Close()
tplBytes, err := ioutil.ReadAll(f)
if err != nil {
return "", err
}
tpl = string(tplBytes)
}
configString, err := p.config.tpl.Process(tpl, &ConfigTemplate{
NodeName: nodeName,
ServerUrl: serverUrl,
ValidationKeyPath: remoteKeyPath,
ValidationClientName: validationClientName,
ChefEnvironment: chefEnvironment,
})
if err != nil {
return "", err
}
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "client.rb"))
if err := comm.Upload(remotePath, bytes.NewReader([]byte(configString)), nil); err != nil {
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createJson(ui packer.Ui, comm packer.Communicator) (string, error) {
ui.Message("Creating JSON attribute file")
jsonData := make(map[string]interface{})
// Copy the configured JSON
for k, v := range p.config.Json {
jsonData[k] = v
}
// Set the run list if it was specified
if len(p.config.RunList) > 0 {
jsonData["run_list"] = p.config.RunList
}
jsonBytes, err := json.MarshalIndent(jsonData, "", " ")
if err != nil {
return "", err
}
// Upload the bytes
remotePath := filepath.ToSlash(filepath.Join(p.config.StagingDir, "first-boot.json"))
if err := comm.Upload(remotePath, bytes.NewReader(jsonBytes), nil); err != nil {
return "", err
}
return remotePath, nil
}
func (p *Provisioner) createDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Creating directory: %s", dir))
mkdirCmd := fmt.Sprintf("mkdir -p '%s'", dir)
if !p.config.PreventSudo {
mkdirCmd = "sudo " + mkdirCmd
}
cmd := &packer.RemoteCmd{
Command: mkdirCmd,
}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf("Non-zero exit status.")
}
return nil
}
func (p *Provisioner) cleanNode(ui packer.Ui, comm packer.Communicator, node string) error {
ui.Say("Cleaning up chef node...")
app := fmt.Sprintf("knife node delete %s -y", node)
cmd := exec.Command("sh", "-c", app)
out, err := cmd.Output()
ui.Message(fmt.Sprintf("%s", out))
if err != nil {
return err
}
return nil
}
func (p *Provisioner) cleanClient(ui packer.Ui, comm packer.Communicator, node string) error {
ui.Say("Cleaning up chef client...")
app := fmt.Sprintf("knife client delete %s -y", node)
cmd := exec.Command("sh", "-c", app)
out, err := cmd.Output()
ui.Message(fmt.Sprintf("%s", out))
if err != nil {
return err
}
return nil
}
func (p *Provisioner) removeDir(ui packer.Ui, comm packer.Communicator, dir string) error {
ui.Message(fmt.Sprintf("Removing directory: %s", dir))
rmCmd := fmt.Sprintf("rm -rf '%s'", dir)
if !p.config.PreventSudo {
rmCmd = "sudo " + rmCmd
}
cmd := &packer.RemoteCmd{
Command: rmCmd,
}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
return nil
}
func (p *Provisioner) executeChef(ui packer.Ui, comm packer.Communicator, config string, json string) error {
command, err := p.config.tpl.Process(p.config.ExecuteCommand, &ExecuteTemplate{
ConfigPath: config,
JsonPath: json,
Sudo: !p.config.PreventSudo,
})
if err != nil {
return err
}
ui.Message(fmt.Sprintf("Executing Chef: %s", command))
cmd := &packer.RemoteCmd{
Command: command,
}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf("Non-zero exit status: %d", cmd.ExitStatus)
}
return nil
}
func (p *Provisioner) installChef(ui packer.Ui, comm packer.Communicator) error {
ui.Message("Installing Chef...")
command, err := p.config.tpl.Process(p.config.InstallCommand, &InstallChefTemplate{
Sudo: !p.config.PreventSudo,
})
if err != nil {
return err
}
cmd := &packer.RemoteCmd{Command: command}
if err := cmd.StartWithUi(comm, ui); err != nil {
return err
}
if cmd.ExitStatus != 0 {
return fmt.Errorf(
"Install script exited with non-zero exit status %d", cmd.ExitStatus)
}
return nil
}
func (p *Provisioner) copyValidationKey(ui packer.Ui, comm packer.Communicator, remotePath string) error {
ui.Message("Uploading validation key...")
// First upload the validation key to a writable location
f, err := os.Open(p.config.ValidationKeyPath)
if err != nil {
return err
}
defer f.Close()
if err := comm.Upload(remotePath, f, nil); err != nil {
return err
}
return nil
}
func (p *Provisioner) deepJsonFix(key string, current interface{}) (interface{}, error) {
if current == nil {
return nil, nil
}
switch c := current.(type) {
case []interface{}:
val := make([]interface{}, len(c))
for i, v := range c {
var err error
val[i], err = p.deepJsonFix(fmt.Sprintf("%s[%d]", key, i), v)
if err != nil {
return nil, err
}
}
return val, nil
case []uint8:
return string(c), nil
case map[interface{}]interface{}:
val := make(map[string]interface{})
for k, v := range c {
ks, ok := k.(string)
if !ok {
return nil, fmt.Errorf("%s: key is not string", key)
}
var err error
val[ks], err = p.deepJsonFix(
fmt.Sprintf("%s.%s", key, ks), v)
if err != nil {
return nil, err
}
}
return val, nil
default:
return current, nil
}
}
func (p *Provisioner) processJsonUserVars() (map[string]interface{}, error) {
jsonBytes, err := json.Marshal(p.config.Json)
if err != nil {
// This really shouldn't happen since we literally just unmarshalled
panic(err)
}
// Copy the user variables so that we can restore them later, and
// make sure we make the quotes JSON-friendly in the user variables.
originalUserVars := make(map[string]string)
for k, v := range p.config.tpl.UserVars {
originalUserVars[k] = v
}
// Make sure we reset them no matter what
defer func() {
p.config.tpl.UserVars = originalUserVars
}()
// Make the current user variables JSON string safe.
for k, v := range p.config.tpl.UserVars {
v = strings.Replace(v, `\`, `\\`, -1)
v = strings.Replace(v, `"`, `\"`, -1)
p.config.tpl.UserVars[k] = v
}
// Process the bytes with the template processor
jsonBytesProcessed, err := p.config.tpl.Process(string(jsonBytes), nil)
if err != nil {
return nil, err
}
var result map[string]interface{}
if err := json.Unmarshal([]byte(jsonBytesProcessed), &result); err != nil {
return nil, err
}
return result, nil
}
var DefaultConfigTemplate = `
log_level :info
log_location STDOUT
chef_server_url "{{.ServerUrl}}"
{{if ne .ValidationClientName ""}}
validation_client_name "{{.ValidationClientName}}"
{{else}}
validation_client_name "chef-validator"
{{end}}
{{if ne .ValidationKeyPath ""}}
validation_key "{{.ValidationKeyPath}}"
{{end}}
node_name "{{.NodeName}}"
{{if ne .ChefEnvironment ""}}
environment "{{.ChefEnvironment}}"
{{end}}
`
|
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package sh
import (
"reflect"
"strings"
"testing"
"github.com/kr/pretty"
)
func lit(s string) Lit { return Lit{Value: s} }
func lits(strs ...string) []Node {
l := make([]Node, len(strs))
for i, s := range strs {
l[i] = lit(s)
}
return l
}
func word(ns ...Node) Word { return Word{Parts: ns} }
func litWord(s string) Word { return word(lits(s)...) }
func litWords(strs ...string) []Word {
l := make([]Word, 0, len(strs))
for _, s := range strs {
l = append(l, litWord(s))
}
return l
}
func cmd(words ...Word) Command { return Command{Args: words} }
func litCmd(strs ...string) Command { return cmd(litWords(strs...)...) }
func stmt(n Node) Stmt { return Stmt{Node: n} }
func stmts(ns ...Node) []Stmt {
l := make([]Stmt, len(ns))
for i, n := range ns {
l[i] = stmt(n)
}
return l
}
func litStmt(strs ...string) Stmt { return stmt(litCmd(strs...)) }
func litStmts(strs ...string) []Stmt {
l := make([]Stmt, len(strs))
for i, s := range strs {
l[i] = litStmt(s)
}
return l
}
func dblQuoted(ns ...Node) DblQuoted { return DblQuoted{Parts: ns} }
func bckQuoted(sts ...Stmt) BckQuoted { return BckQuoted{Stmts: sts} }
func block(sts ...Stmt) Block { return Block{Stmts: sts} }
func cmdSubst(sts ...Stmt) CmdSubst { return CmdSubst{Stmts: sts} }
type testCase struct {
strs []string
ast interface{}
}
var astTests = []testCase{
{
[]string{"", " ", "\t", "\n \n"},
nil,
},
{
[]string{"", "# foo", "# foo ( bar", "# foo'bar"},
nil,
},
{
[]string{"foo", "foo ", " foo", "foo # bar"},
litCmd("foo"),
},
{
[]string{"foo; bar", "foo; bar;", "foo;bar;", "\nfoo\nbar\n"},
litStmts("foo", "bar"),
},
{
[]string{"foo a b", " foo a b ", "foo \\\n a b"},
litCmd("foo", "a", "b"),
},
{
[]string{"foobar", "foo\\\nbar"},
litCmd("foobar"),
},
{
[]string{"foo'bar'"},
litCmd("foo'bar'"),
},
{
[]string{"(foo)", "(foo;)", "(\nfoo\n)"},
Subshell{Stmts: litStmts("foo")},
},
{
[]string{"(foo; bar)"},
Subshell{Stmts: litStmts("foo", "bar")},
},
{
[]string{"( )"},
Subshell{},
},
{
[]string{"{ foo; }", "{\nfoo\n}"},
block(litStmt("foo")),
},
{
[]string{
"if a; then b; fi",
"if a\nthen\nb\nfi",
},
IfStmt{
Conds: litStmts("a"),
ThenStmts: litStmts("b"),
},
},
{
[]string{
"if a; then b; else c; fi",
"if a\nthen b\nelse\nc\nfi",
},
IfStmt{
Conds: litStmts("a"),
ThenStmts: litStmts("b"),
ElseStmts: litStmts("c"),
},
},
{
[]string{
"if a; then a; elif b; then b; elif c; then c; else d; fi",
"if a\nthen a\nelif b\nthen b\nelif c\nthen c\nelse\nd\nfi",
},
IfStmt{
Conds: litStmts("a"),
ThenStmts: litStmts("a"),
Elifs: []Elif{
{
Conds: litStmts("b"),
ThenStmts: litStmts("b"),
},
{
Conds: litStmts("c"),
ThenStmts: litStmts("c"),
},
},
ElseStmts: litStmts("d"),
},
},
{
[]string{"if a1; a2 foo; a3 bar; then b; fi"},
IfStmt{
Conds: []Stmt{
litStmt("a1"),
litStmt("a2", "foo"),
litStmt("a3", "bar"),
},
ThenStmts: litStmts("b"),
},
},
{
[]string{"while a; do b; done", "while a\ndo\nb\ndone"},
WhileStmt{
Conds: litStmts("a"),
DoStmts: litStmts("b"),
},
},
{
[]string{"until a; do b; done", "until a\ndo\nb\ndone"},
UntilStmt{
Conds: litStmts("a"),
DoStmts: litStmts("b"),
},
},
{
[]string{
"for i; do foo; done",
"for i in; do foo; done",
},
ForStmt{
Name: lit("i"),
DoStmts: litStmts("foo"),
},
},
{
[]string{
"for i in 1 2 3; do echo $i; done",
"for i in 1 2 3\ndo echo $i\ndone",
"for i in 1 2 3 #foo\ndo echo $i\ndone",
},
ForStmt{
Name: lit("i"),
WordList: litWords("1", "2", "3"),
DoStmts: stmts(cmd(
litWord("echo"),
word(ParamExp{Short: true, Text: "i"}),
)),
},
},
{
[]string{`echo ' ' "foo bar"`},
cmd(
litWord("echo"),
litWord("' '"),
word(dblQuoted(lits("foo bar")...)),
),
},
{
[]string{`"foo \" bar"`},
cmd(
word(dblQuoted(lits(`foo \" bar`)...)),
),
},
{
[]string{"\">foo\" \"\nbar\""},
cmd(
word(dblQuoted(lit(">foo"))),
word(dblQuoted(lit("\nbar"))),
),
},
{
[]string{`foo \" bar`},
litCmd(`foo`, `\"`, `bar`),
},
{
[]string{`'"'`},
litCmd(`'"'`),
},
{
[]string{"'`'"},
litCmd("'`'"),
},
{
[]string{`"'"`},
cmd(
word(dblQuoted(lit("'"))),
),
},
{
[]string{"s{s s=s"},
litCmd("s{s", "s=s"),
},
{
[]string{"foo && bar", "foo&&bar", "foo &&\nbar"},
BinaryExpr{
Op: LAND,
X: litStmt("foo"),
Y: litStmt("bar"),
},
},
{
[]string{"foo || bar", "foo||bar", "foo ||\nbar"},
BinaryExpr{
Op: LOR,
X: litStmt("foo"),
Y: litStmt("bar"),
},
},
{
[]string{"if a; then b; fi || while a; do b; done"},
BinaryExpr{
Op: LOR,
X: stmt(IfStmt{
Conds: litStmts("a"),
ThenStmts: litStmts("b"),
}),
Y: stmt(WhileStmt{
Conds: litStmts("a"),
DoStmts: litStmts("b"),
}),
},
},
{
[]string{"foo && bar1 || bar2"},
BinaryExpr{
Op: LAND,
X: litStmt("foo"),
Y: stmt(BinaryExpr{
Op: LOR,
X: litStmt("bar1"),
Y: litStmt("bar2"),
}),
},
},
{
[]string{"foo | bar", "foo|bar", "foo |\n#etc\nbar"},
BinaryExpr{
Op: OR,
X: litStmt("foo"),
Y: litStmt("bar"),
},
},
{
[]string{"foo | bar | extra"},
BinaryExpr{
Op: OR,
X: litStmt("foo"),
Y: stmt(BinaryExpr{
Op: OR,
X: litStmt("bar"),
Y: litStmt("extra"),
}),
},
},
{
[]string{
"foo() { a; b; }",
"foo() {\na\nb\n}",
"foo ( ) {\na\nb\n}",
},
FuncDecl{
Name: lit("foo"),
Body: stmt(block(litStmts("a", "b")...)),
},
},
{
[]string{"foo() { a; }; bar", "foo() {\na\n}\nbar"},
[]Node{
FuncDecl{
Name: lit("foo"),
Body: stmt(block(litStmts("a")...)),
},
litCmd("bar"),
},
},
{
[]string{
"foo >a >>b <c",
"foo > a >> b < c",
">a >>b foo <c",
},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: RDROUT, Word: litWord("a")},
{Op: APPEND, Word: litWord("b")},
{Op: RDRIN, Word: litWord("c")},
},
},
},
{
[]string{
"foo bar >a",
"foo >a bar",
},
Stmt{
Node: litCmd("foo", "bar"),
Redirs: []Redirect{
{Op: RDROUT, Word: litWord("a")},
},
},
},
{
[]string{`>a >\b`},
Stmt{
Redirs: []Redirect{
{Op: RDROUT, Word: litWord("a")},
{Op: RDROUT, Word: litWord(`\b`)},
},
},
},
{
[]string{
"foo <<EOF\nbar\nEOF",
"foo <<EOF\nbar",
},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: HEREDOC, Word: litWord("EOF\nbar\nEOF")},
},
},
},
{
[]string{"foo <<EOF\nl1\nl2\nl3\nEOF"},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: HEREDOC, Word: litWord("EOF\nl1\nl2\nl3\nEOF")},
},
},
},
{
[]string{"{ foo <<EOF\nbar\nEOF\n}"},
block(Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: HEREDOC, Word: litWord("EOF\nbar\nEOF")},
},
}),
},
{
[]string{"foo <<EOF\nbar\nEOF\nfoo2"},
[]Stmt{
{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: HEREDOC, Word: litWord("EOF\nbar\nEOF")},
},
},
litStmt("foo2"),
},
},
{
[]string{"foo <<FOOBAR\nbar\nFOOBAR"},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: HEREDOC, Word: litWord("FOOBAR\nbar\nFOOBAR")},
},
},
},
{
[]string{"foo <<\"EOF\"\nbar\nEOF"},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: HEREDOC, Word: litWord("\"EOF\"\nbar\nEOF")},
},
},
},
{
[]string{
"foo <<-EOF\nbar\nEOF",
"foo <<- EOF\nbar\nEOF",
},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: DHEREDOC, Word: litWord("EOF\nbar\nEOF")},
},
},
},
{
[]string{"foo >&2 <&0 2>file <>f2"},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: DPLOUT, Word: litWord("2")},
{Op: DPLIN, Word: litWord("0")},
{Op: RDROUT, N: lit("2"), Word: litWord("file")},
{Op: OPRDWR, Word: litWord("f2")},
},
},
},
{
[]string{"a >f1; b >f2"},
[]Stmt{
{
Node: litCmd("a"),
Redirs: []Redirect{{Op: RDROUT, Word: litWord("f1")}},
},
{
Node: litCmd("b"),
Redirs: []Redirect{{Op: RDROUT, Word: litWord("f2")}},
},
},
},
{
[]string{"!"},
Stmt{Negated: true},
},
{
[]string{"! foo"},
Stmt{
Negated: true,
Node: litCmd("foo"),
},
},
{
[]string{"foo &; bar", "foo & bar", "foo&bar"},
[]Stmt{
{
Node: litCmd("foo"),
Background: true,
},
litStmt("bar"),
},
},
{
[]string{"! if foo; then bar; fi >/dev/null &"},
Stmt{
Negated: true,
Node: IfStmt{
Conds: litStmts("foo"),
ThenStmts: litStmts("bar"),
},
Redirs: []Redirect{
{Op: RDROUT, Word: litWord("/dev/null")},
},
Background: true,
},
},
{
[]string{"echo foo#bar"},
litCmd("echo", "foo#bar"),
},
{
[]string{"{ echo } }; }"},
block(litStmt("echo", "}", "}")),
},
{
[]string{`{foo}`},
litCmd(`{foo}`),
},
{
[]string{`{"foo"`},
cmd(
word(
lit("{"),
dblQuoted(lit("foo")),
),
),
},
{
[]string{`!foo`},
litCmd(`!foo`),
},
{
[]string{"$(foo bar)"},
cmd(
word(cmdSubst(litStmt("foo", "bar"))),
),
},
{
[]string{"$(foo | bar)"},
cmd(
word(cmdSubst(
stmt(BinaryExpr{
Op: OR,
X: litStmt("foo"),
Y: litStmt("bar"),
}),
)),
),
},
{
[]string{"`foo`"},
cmd(
word(bckQuoted(litStmt("foo"))),
),
},
{
[]string{"`foo | bar`"},
cmd(
word(bckQuoted(
stmt(BinaryExpr{
Op: OR,
X: litStmt("foo"),
Y: litStmt("bar"),
}),
)),
),
},
{
[]string{"`foo 'bar'`"},
cmd(
word(bckQuoted(litStmt("foo", "'bar'"))),
),
},
{
[]string{"`foo \"bar\"`"},
cmd(
word(bckQuoted(
stmt(Command{Args: []Word{
litWord("foo"),
word(dblQuoted(lit("bar"))),
}}),
)),
),
},
{
[]string{`echo "$foo"`},
cmd(
litWord("echo"),
word(dblQuoted(ParamExp{Short: true, Text: "foo"})),
),
},
{
[]string{`$@ $# $$`},
cmd(
word(ParamExp{Short: true, Text: "@"}),
word(ParamExp{Short: true, Text: "#"}),
word(ParamExp{Short: true, Text: "$"}),
),
},
{
[]string{`echo $'foo'`},
cmd(
litWord("echo"),
word(ParamExp{Short: true, Text: "'foo'"}),
),
},
{
[]string{`echo "${foo}"`},
cmd(
litWord("echo"),
word(dblQuoted(ParamExp{Text: "foo"})),
),
},
{
[]string{`echo "(foo)"`},
cmd(
litWord("echo"),
word(dblQuoted(lit("(foo)"))),
),
},
{
[]string{`echo "${foo}>"`},
cmd(
litWord("echo"),
word(dblQuoted(
ParamExp{Text: "foo"},
lit(">"),
)),
),
},
{
[]string{`echo "$(foo)"`},
cmd(
litWord("echo"),
word(dblQuoted(cmdSubst(litStmt("foo")))),
),
},
{
[]string{`echo "$(foo bar)"`, `echo "$(foo bar)"`},
cmd(
litWord("echo"),
word(dblQuoted(cmdSubst(litStmt("foo", "bar")))),
),
},
{
[]string{"echo \"`foo`\""},
cmd(
litWord("echo"),
word(dblQuoted(bckQuoted(litStmt("foo")))),
),
},
{
[]string{"echo \"`foo bar`\"", "echo \"`foo bar`\""},
cmd(
litWord("echo"),
word(dblQuoted(bckQuoted(litStmt("foo", "bar")))),
),
},
{
[]string{`echo '${foo}'`},
litCmd("echo", "'${foo}'"),
},
{
[]string{"echo ${foo bar}"},
cmd(
litWord("echo"),
word(ParamExp{Text: "foo bar"}),
),
},
{
[]string{"$(($x-1))"},
cmd(word(ArithmExp{Text: "$x-1"})),
},
{
[]string{"echo foo$bar"},
cmd(
litWord("echo"),
word(lit("foo"), ParamExp{Short: true, Text: "bar"}),
),
},
{
[]string{"echo foo$(bar)"},
cmd(
litWord("echo"),
word(lit("foo"), cmdSubst(litStmt("bar"))),
),
},
{
[]string{"echo foo${bar bar}"},
cmd(
litWord("echo"),
word(lit("foo"), ParamExp{Text: "bar bar"}),
),
},
{
[]string{"echo 'foo${bar'"},
litCmd("echo", "'foo${bar'"),
},
{
[]string{"(foo); bar"},
[]Node{
Subshell{Stmts: litStmts("foo")},
litCmd("bar"),
},
},
{
[]string{"foo; (bar)", "foo\n(bar)"},
[]Node{
litCmd("foo"),
Subshell{Stmts: litStmts("bar")},
},
},
{
[]string{"foo; (bar)", "foo\n(bar)"},
[]Node{
litCmd("foo"),
Subshell{Stmts: litStmts("bar")},
},
},
{
[]string{"a=\"\nbar\""},
cmd(
word(lit("a="), dblQuoted(lit("\nbar"))),
),
},
{
[]string{
"case $i in 1) foo;; 2 | 3*) bar; esac",
"case $i in 1) foo;; 2 | 3*) bar;; esac",
"case $i in (1) foo;; 2 | 3*) bar;; esac",
"case $i\nin\n1)\nfoo\n;;\n2 | 3*)\nbar\n;;\nesac",
},
CaseStmt{
Word: word(ParamExp{Short: true, Text: "i"}),
List: []PatternList{
{
Patterns: litWords("1"),
Stmts: litStmts("foo"),
},
{
Patterns: litWords("2", "3*"),
Stmts: litStmts("bar"),
},
},
},
},
{
[]string{"foo | while read a; do b; done"},
BinaryExpr{
Op: OR,
X: litStmt("foo"),
Y: stmt(WhileStmt{
Conds: []Stmt{litStmt("read", "a")},
DoStmts: litStmts("b"),
}),
},
},
{
[]string{"while read l; do foo || bar; done"},
WhileStmt{
Conds: []Stmt{litStmt("read", "l")},
DoStmts: stmts(BinaryExpr{
Op: LOR,
X: litStmt("foo"),
Y: litStmt("bar"),
}),
},
},
{
[]string{"echo if while"},
litCmd("echo", "if", "while"),
},
{
[]string{"echo ${foo}if"},
cmd(
litWord("echo"),
word(ParamExp{Text: "foo"}, lit("if")),
),
},
{
[]string{"echo $if"},
cmd(
litWord("echo"),
word(ParamExp{Short: true, Text: "if"}),
),
},
{
[]string{"if; then; fi", "if\nthen\nfi"},
IfStmt{},
},
{
[]string{"while; do; done", "while\ndo\ndone"},
WhileStmt{},
},
{
[]string{"while; do; done", "while\ndo\n#foo\ndone"},
WhileStmt{},
},
{
[]string{"until; do; done", "until\ndo\ndone"},
UntilStmt{},
},
{
[]string{"for i; do; done", "for i\ndo\ndone"},
ForStmt{Name: lit("i")},
},
{
[]string{"case i in; esac"},
CaseStmt{Word: litWord("i")},
},
{
[]string{"f1 && f2 | bar"},
BinaryExpr{
Op: OR,
X: stmt(BinaryExpr{
Op: LAND,
X: litStmt("f1"),
Y: litStmt("f2"),
}),
Y: litStmt("bar"),
},
},
{
[]string{"foo | b1 && b2"},
BinaryExpr{
Op: OR,
X: litStmt("foo"),
Y: stmt(BinaryExpr{
Op: LAND,
X: litStmt("b1"),
Y: litStmt("b2"),
}),
},
},
{
[]string{"foo >f | bar"},
BinaryExpr{
Op: OR,
X: Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: RDROUT, Word: litWord("f")},
},
},
Y: litStmt("bar"),
},
},
{
[]string{"foo >f || bar"},
BinaryExpr{
Op: LOR,
X: Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: RDROUT, Word: litWord("f")},
},
},
Y: litStmt("bar"),
},
},
}
func fullProg(v interface{}) (f File) {
switch x := v.(type) {
case []Stmt:
f.Stmts = x
case Stmt:
f.Stmts = append(f.Stmts, x)
case []Node:
for _, n := range x {
f.Stmts = append(f.Stmts, stmt(n))
}
case Node:
f.Stmts = append(f.Stmts, stmt(x))
}
return
}
func setPos(t *testing.T, v interface{}, to Pos, diff bool) Node {
set := func(p *Pos) {
if diff && *p == to {
t.Fatalf("Pos in %v (%T) is already %v", v, v, to)
}
*p = to
}
switch x := v.(type) {
case []Stmt:
for i := range x {
setPos(t, &x[i], to, diff)
}
case *Stmt:
set(&x.Position)
x.Node = setPos(t, x.Node, to, diff)
for i := range x.Redirs {
set(&x.Redirs[i].OpPos)
setPos(t, &x.Redirs[i].N, to, diff)
setPos(t, &x.Redirs[i].Word, to, diff)
}
case Command:
setPos(t, x.Args, to, diff)
return x
case []Word:
for i := range x {
setPos(t, &x[i], to, diff)
}
case *Word:
setPos(t, x.Parts, to, diff)
case []Node:
for i := range x {
x[i] = setPos(t, x[i], to, diff)
}
case *Lit:
set(&x.ValuePos)
case Lit:
set(&x.ValuePos)
return x
case Subshell:
set(&x.Lparen)
set(&x.Rparen)
setPos(t, x.Stmts, to, diff)
return x
case Block:
set(&x.Lbrace)
set(&x.Rbrace)
setPos(t, x.Stmts, to, diff)
return x
case IfStmt:
set(&x.If)
set(&x.Fi)
setPos(t, x.Conds, to, diff)
setPos(t, x.ThenStmts, to, diff)
for i := range x.Elifs {
set(&x.Elifs[i].Elif)
setPos(t, x.Elifs[i].Conds, to, diff)
setPos(t, x.Elifs[i].ThenStmts, to, diff)
}
setPos(t, x.ElseStmts, to, diff)
return x
case WhileStmt:
set(&x.While)
set(&x.Done)
setPos(t, x.Conds, to, diff)
setPos(t, x.DoStmts, to, diff)
return x
case UntilStmt:
set(&x.Until)
set(&x.Done)
setPos(t, x.Conds, to, diff)
setPos(t, x.DoStmts, to, diff)
return x
case ForStmt:
set(&x.For)
set(&x.Done)
setPos(t, &x.Name, to, diff)
setPos(t, x.WordList, to, diff)
setPos(t, x.DoStmts, to, diff)
return x
case DblQuoted:
set(&x.Quote)
setPos(t, x.Parts, to, diff)
return x
case BckQuoted:
set(&x.Quote)
setPos(t, x.Stmts, to, diff)
return x
case BinaryExpr:
set(&x.OpPos)
setPos(t, &x.X, to, diff)
setPos(t, &x.Y, to, diff)
return x
case FuncDecl:
setPos(t, &x.Name, to, diff)
setPos(t, &x.Body, to, diff)
return x
case ParamExp:
set(&x.Exp)
return x
case ArithmExp:
set(&x.Exp)
return x
case CmdSubst:
set(&x.Exp)
set(&x.Rparen)
setPos(t, x.Stmts, to, diff)
return x
case CaseStmt:
set(&x.Case)
set(&x.Esac)
setPos(t, &x.Word, to, diff)
for _, pl := range x.List {
setPos(t, pl.Patterns, to, diff)
setPos(t, pl.Stmts, to, diff)
}
return x
case nil:
default:
panic(v)
}
return nil
}
func TestNodePos(t *testing.T) {
p := Pos{
Line: 12,
Column: 34,
}
defaultPos = p
allTests := astTests
for _, v := range []interface{}{
Command{},
Command{Args: []Word{
{},
}},
} {
allTests = append(allTests, testCase{nil, v})
}
for _, c := range allTests {
want := fullProg(c.ast)
setPos(t, want.Stmts, p, true)
for _, s := range want.Stmts {
if s.Pos() != p {
t.Fatalf("Found unexpected Pos in %v", s)
}
n := s.Node
if n != nil && n.Pos() != p {
t.Fatalf("Found unexpected Pos in %v", n)
}
}
}
}
func TestParseAST(t *testing.T) {
for _, c := range astTests {
want := fullProg(c.ast)
setPos(t, want.Stmts, Pos{}, false)
for _, in := range c.strs {
r := strings.NewReader(in)
got, err := Parse(r, "")
if err != nil {
t.Fatalf("Unexpected error in %q: %v", in, err)
}
setPos(t, got.Stmts, Pos{}, true)
if !reflect.DeepEqual(got, want) {
t.Fatalf("AST mismatch in %q\nwant: %s\ngot: %s\ndiff:\n%s",
in, want, got,
strings.Join(pretty.Diff(want, got), "\n"),
)
}
}
}
}
func TestPrintAST(t *testing.T) {
for _, c := range astTests {
in := fullProg(c.ast)
want := c.strs[0]
got := in.String()
if got != want {
t.Fatalf("AST print mismatch\nwant: %s\ngot: %s",
want, got)
}
}
}
ast_test: rename pos funcs to be clearer
// Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>
// See LICENSE for licensing information
package sh
import (
"reflect"
"strings"
"testing"
"github.com/kr/pretty"
)
func lit(s string) Lit { return Lit{Value: s} }
func lits(strs ...string) []Node {
l := make([]Node, len(strs))
for i, s := range strs {
l[i] = lit(s)
}
return l
}
func word(ns ...Node) Word { return Word{Parts: ns} }
func litWord(s string) Word { return word(lits(s)...) }
func litWords(strs ...string) []Word {
l := make([]Word, 0, len(strs))
for _, s := range strs {
l = append(l, litWord(s))
}
return l
}
func cmd(words ...Word) Command { return Command{Args: words} }
func litCmd(strs ...string) Command { return cmd(litWords(strs...)...) }
func stmt(n Node) Stmt { return Stmt{Node: n} }
func stmts(ns ...Node) []Stmt {
l := make([]Stmt, len(ns))
for i, n := range ns {
l[i] = stmt(n)
}
return l
}
func litStmt(strs ...string) Stmt { return stmt(litCmd(strs...)) }
func litStmts(strs ...string) []Stmt {
l := make([]Stmt, len(strs))
for i, s := range strs {
l[i] = litStmt(s)
}
return l
}
func dblQuoted(ns ...Node) DblQuoted { return DblQuoted{Parts: ns} }
func bckQuoted(sts ...Stmt) BckQuoted { return BckQuoted{Stmts: sts} }
func block(sts ...Stmt) Block { return Block{Stmts: sts} }
func cmdSubst(sts ...Stmt) CmdSubst { return CmdSubst{Stmts: sts} }
type testCase struct {
strs []string
ast interface{}
}
var astTests = []testCase{
{
[]string{"", " ", "\t", "\n \n"},
nil,
},
{
[]string{"", "# foo", "# foo ( bar", "# foo'bar"},
nil,
},
{
[]string{"foo", "foo ", " foo", "foo # bar"},
litCmd("foo"),
},
{
[]string{"foo; bar", "foo; bar;", "foo;bar;", "\nfoo\nbar\n"},
litStmts("foo", "bar"),
},
{
[]string{"foo a b", " foo a b ", "foo \\\n a b"},
litCmd("foo", "a", "b"),
},
{
[]string{"foobar", "foo\\\nbar"},
litCmd("foobar"),
},
{
[]string{"foo'bar'"},
litCmd("foo'bar'"),
},
{
[]string{"(foo)", "(foo;)", "(\nfoo\n)"},
Subshell{Stmts: litStmts("foo")},
},
{
[]string{"(foo; bar)"},
Subshell{Stmts: litStmts("foo", "bar")},
},
{
[]string{"( )"},
Subshell{},
},
{
[]string{"{ foo; }", "{\nfoo\n}"},
block(litStmt("foo")),
},
{
[]string{
"if a; then b; fi",
"if a\nthen\nb\nfi",
},
IfStmt{
Conds: litStmts("a"),
ThenStmts: litStmts("b"),
},
},
{
[]string{
"if a; then b; else c; fi",
"if a\nthen b\nelse\nc\nfi",
},
IfStmt{
Conds: litStmts("a"),
ThenStmts: litStmts("b"),
ElseStmts: litStmts("c"),
},
},
{
[]string{
"if a; then a; elif b; then b; elif c; then c; else d; fi",
"if a\nthen a\nelif b\nthen b\nelif c\nthen c\nelse\nd\nfi",
},
IfStmt{
Conds: litStmts("a"),
ThenStmts: litStmts("a"),
Elifs: []Elif{
{
Conds: litStmts("b"),
ThenStmts: litStmts("b"),
},
{
Conds: litStmts("c"),
ThenStmts: litStmts("c"),
},
},
ElseStmts: litStmts("d"),
},
},
{
[]string{"if a1; a2 foo; a3 bar; then b; fi"},
IfStmt{
Conds: []Stmt{
litStmt("a1"),
litStmt("a2", "foo"),
litStmt("a3", "bar"),
},
ThenStmts: litStmts("b"),
},
},
{
[]string{"while a; do b; done", "while a\ndo\nb\ndone"},
WhileStmt{
Conds: litStmts("a"),
DoStmts: litStmts("b"),
},
},
{
[]string{"until a; do b; done", "until a\ndo\nb\ndone"},
UntilStmt{
Conds: litStmts("a"),
DoStmts: litStmts("b"),
},
},
{
[]string{
"for i; do foo; done",
"for i in; do foo; done",
},
ForStmt{
Name: lit("i"),
DoStmts: litStmts("foo"),
},
},
{
[]string{
"for i in 1 2 3; do echo $i; done",
"for i in 1 2 3\ndo echo $i\ndone",
"for i in 1 2 3 #foo\ndo echo $i\ndone",
},
ForStmt{
Name: lit("i"),
WordList: litWords("1", "2", "3"),
DoStmts: stmts(cmd(
litWord("echo"),
word(ParamExp{Short: true, Text: "i"}),
)),
},
},
{
[]string{`echo ' ' "foo bar"`},
cmd(
litWord("echo"),
litWord("' '"),
word(dblQuoted(lits("foo bar")...)),
),
},
{
[]string{`"foo \" bar"`},
cmd(
word(dblQuoted(lits(`foo \" bar`)...)),
),
},
{
[]string{"\">foo\" \"\nbar\""},
cmd(
word(dblQuoted(lit(">foo"))),
word(dblQuoted(lit("\nbar"))),
),
},
{
[]string{`foo \" bar`},
litCmd(`foo`, `\"`, `bar`),
},
{
[]string{`'"'`},
litCmd(`'"'`),
},
{
[]string{"'`'"},
litCmd("'`'"),
},
{
[]string{`"'"`},
cmd(
word(dblQuoted(lit("'"))),
),
},
{
[]string{"s{s s=s"},
litCmd("s{s", "s=s"),
},
{
[]string{"foo && bar", "foo&&bar", "foo &&\nbar"},
BinaryExpr{
Op: LAND,
X: litStmt("foo"),
Y: litStmt("bar"),
},
},
{
[]string{"foo || bar", "foo||bar", "foo ||\nbar"},
BinaryExpr{
Op: LOR,
X: litStmt("foo"),
Y: litStmt("bar"),
},
},
{
[]string{"if a; then b; fi || while a; do b; done"},
BinaryExpr{
Op: LOR,
X: stmt(IfStmt{
Conds: litStmts("a"),
ThenStmts: litStmts("b"),
}),
Y: stmt(WhileStmt{
Conds: litStmts("a"),
DoStmts: litStmts("b"),
}),
},
},
{
[]string{"foo && bar1 || bar2"},
BinaryExpr{
Op: LAND,
X: litStmt("foo"),
Y: stmt(BinaryExpr{
Op: LOR,
X: litStmt("bar1"),
Y: litStmt("bar2"),
}),
},
},
{
[]string{"foo | bar", "foo|bar", "foo |\n#etc\nbar"},
BinaryExpr{
Op: OR,
X: litStmt("foo"),
Y: litStmt("bar"),
},
},
{
[]string{"foo | bar | extra"},
BinaryExpr{
Op: OR,
X: litStmt("foo"),
Y: stmt(BinaryExpr{
Op: OR,
X: litStmt("bar"),
Y: litStmt("extra"),
}),
},
},
{
[]string{
"foo() { a; b; }",
"foo() {\na\nb\n}",
"foo ( ) {\na\nb\n}",
},
FuncDecl{
Name: lit("foo"),
Body: stmt(block(litStmts("a", "b")...)),
},
},
{
[]string{"foo() { a; }; bar", "foo() {\na\n}\nbar"},
[]Node{
FuncDecl{
Name: lit("foo"),
Body: stmt(block(litStmts("a")...)),
},
litCmd("bar"),
},
},
{
[]string{
"foo >a >>b <c",
"foo > a >> b < c",
">a >>b foo <c",
},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: RDROUT, Word: litWord("a")},
{Op: APPEND, Word: litWord("b")},
{Op: RDRIN, Word: litWord("c")},
},
},
},
{
[]string{
"foo bar >a",
"foo >a bar",
},
Stmt{
Node: litCmd("foo", "bar"),
Redirs: []Redirect{
{Op: RDROUT, Word: litWord("a")},
},
},
},
{
[]string{`>a >\b`},
Stmt{
Redirs: []Redirect{
{Op: RDROUT, Word: litWord("a")},
{Op: RDROUT, Word: litWord(`\b`)},
},
},
},
{
[]string{
"foo <<EOF\nbar\nEOF",
"foo <<EOF\nbar",
},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: HEREDOC, Word: litWord("EOF\nbar\nEOF")},
},
},
},
{
[]string{"foo <<EOF\nl1\nl2\nl3\nEOF"},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: HEREDOC, Word: litWord("EOF\nl1\nl2\nl3\nEOF")},
},
},
},
{
[]string{"{ foo <<EOF\nbar\nEOF\n}"},
block(Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: HEREDOC, Word: litWord("EOF\nbar\nEOF")},
},
}),
},
{
[]string{"foo <<EOF\nbar\nEOF\nfoo2"},
[]Stmt{
{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: HEREDOC, Word: litWord("EOF\nbar\nEOF")},
},
},
litStmt("foo2"),
},
},
{
[]string{"foo <<FOOBAR\nbar\nFOOBAR"},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: HEREDOC, Word: litWord("FOOBAR\nbar\nFOOBAR")},
},
},
},
{
[]string{"foo <<\"EOF\"\nbar\nEOF"},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: HEREDOC, Word: litWord("\"EOF\"\nbar\nEOF")},
},
},
},
{
[]string{
"foo <<-EOF\nbar\nEOF",
"foo <<- EOF\nbar\nEOF",
},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: DHEREDOC, Word: litWord("EOF\nbar\nEOF")},
},
},
},
{
[]string{"foo >&2 <&0 2>file <>f2"},
Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: DPLOUT, Word: litWord("2")},
{Op: DPLIN, Word: litWord("0")},
{Op: RDROUT, N: lit("2"), Word: litWord("file")},
{Op: OPRDWR, Word: litWord("f2")},
},
},
},
{
[]string{"a >f1; b >f2"},
[]Stmt{
{
Node: litCmd("a"),
Redirs: []Redirect{{Op: RDROUT, Word: litWord("f1")}},
},
{
Node: litCmd("b"),
Redirs: []Redirect{{Op: RDROUT, Word: litWord("f2")}},
},
},
},
{
[]string{"!"},
Stmt{Negated: true},
},
{
[]string{"! foo"},
Stmt{
Negated: true,
Node: litCmd("foo"),
},
},
{
[]string{"foo &; bar", "foo & bar", "foo&bar"},
[]Stmt{
{
Node: litCmd("foo"),
Background: true,
},
litStmt("bar"),
},
},
{
[]string{"! if foo; then bar; fi >/dev/null &"},
Stmt{
Negated: true,
Node: IfStmt{
Conds: litStmts("foo"),
ThenStmts: litStmts("bar"),
},
Redirs: []Redirect{
{Op: RDROUT, Word: litWord("/dev/null")},
},
Background: true,
},
},
{
[]string{"echo foo#bar"},
litCmd("echo", "foo#bar"),
},
{
[]string{"{ echo } }; }"},
block(litStmt("echo", "}", "}")),
},
{
[]string{`{foo}`},
litCmd(`{foo}`),
},
{
[]string{`{"foo"`},
cmd(
word(
lit("{"),
dblQuoted(lit("foo")),
),
),
},
{
[]string{`!foo`},
litCmd(`!foo`),
},
{
[]string{"$(foo bar)"},
cmd(
word(cmdSubst(litStmt("foo", "bar"))),
),
},
{
[]string{"$(foo | bar)"},
cmd(
word(cmdSubst(
stmt(BinaryExpr{
Op: OR,
X: litStmt("foo"),
Y: litStmt("bar"),
}),
)),
),
},
{
[]string{"`foo`"},
cmd(
word(bckQuoted(litStmt("foo"))),
),
},
{
[]string{"`foo | bar`"},
cmd(
word(bckQuoted(
stmt(BinaryExpr{
Op: OR,
X: litStmt("foo"),
Y: litStmt("bar"),
}),
)),
),
},
{
[]string{"`foo 'bar'`"},
cmd(
word(bckQuoted(litStmt("foo", "'bar'"))),
),
},
{
[]string{"`foo \"bar\"`"},
cmd(
word(bckQuoted(
stmt(Command{Args: []Word{
litWord("foo"),
word(dblQuoted(lit("bar"))),
}}),
)),
),
},
{
[]string{`echo "$foo"`},
cmd(
litWord("echo"),
word(dblQuoted(ParamExp{Short: true, Text: "foo"})),
),
},
{
[]string{`$@ $# $$`},
cmd(
word(ParamExp{Short: true, Text: "@"}),
word(ParamExp{Short: true, Text: "#"}),
word(ParamExp{Short: true, Text: "$"}),
),
},
{
[]string{`echo $'foo'`},
cmd(
litWord("echo"),
word(ParamExp{Short: true, Text: "'foo'"}),
),
},
{
[]string{`echo "${foo}"`},
cmd(
litWord("echo"),
word(dblQuoted(ParamExp{Text: "foo"})),
),
},
{
[]string{`echo "(foo)"`},
cmd(
litWord("echo"),
word(dblQuoted(lit("(foo)"))),
),
},
{
[]string{`echo "${foo}>"`},
cmd(
litWord("echo"),
word(dblQuoted(
ParamExp{Text: "foo"},
lit(">"),
)),
),
},
{
[]string{`echo "$(foo)"`},
cmd(
litWord("echo"),
word(dblQuoted(cmdSubst(litStmt("foo")))),
),
},
{
[]string{`echo "$(foo bar)"`, `echo "$(foo bar)"`},
cmd(
litWord("echo"),
word(dblQuoted(cmdSubst(litStmt("foo", "bar")))),
),
},
{
[]string{"echo \"`foo`\""},
cmd(
litWord("echo"),
word(dblQuoted(bckQuoted(litStmt("foo")))),
),
},
{
[]string{"echo \"`foo bar`\"", "echo \"`foo bar`\""},
cmd(
litWord("echo"),
word(dblQuoted(bckQuoted(litStmt("foo", "bar")))),
),
},
{
[]string{`echo '${foo}'`},
litCmd("echo", "'${foo}'"),
},
{
[]string{"echo ${foo bar}"},
cmd(
litWord("echo"),
word(ParamExp{Text: "foo bar"}),
),
},
{
[]string{"$(($x-1))"},
cmd(word(ArithmExp{Text: "$x-1"})),
},
{
[]string{"echo foo$bar"},
cmd(
litWord("echo"),
word(lit("foo"), ParamExp{Short: true, Text: "bar"}),
),
},
{
[]string{"echo foo$(bar)"},
cmd(
litWord("echo"),
word(lit("foo"), cmdSubst(litStmt("bar"))),
),
},
{
[]string{"echo foo${bar bar}"},
cmd(
litWord("echo"),
word(lit("foo"), ParamExp{Text: "bar bar"}),
),
},
{
[]string{"echo 'foo${bar'"},
litCmd("echo", "'foo${bar'"),
},
{
[]string{"(foo); bar"},
[]Node{
Subshell{Stmts: litStmts("foo")},
litCmd("bar"),
},
},
{
[]string{"foo; (bar)", "foo\n(bar)"},
[]Node{
litCmd("foo"),
Subshell{Stmts: litStmts("bar")},
},
},
{
[]string{"foo; (bar)", "foo\n(bar)"},
[]Node{
litCmd("foo"),
Subshell{Stmts: litStmts("bar")},
},
},
{
[]string{"a=\"\nbar\""},
cmd(
word(lit("a="), dblQuoted(lit("\nbar"))),
),
},
{
[]string{
"case $i in 1) foo;; 2 | 3*) bar; esac",
"case $i in 1) foo;; 2 | 3*) bar;; esac",
"case $i in (1) foo;; 2 | 3*) bar;; esac",
"case $i\nin\n1)\nfoo\n;;\n2 | 3*)\nbar\n;;\nesac",
},
CaseStmt{
Word: word(ParamExp{Short: true, Text: "i"}),
List: []PatternList{
{
Patterns: litWords("1"),
Stmts: litStmts("foo"),
},
{
Patterns: litWords("2", "3*"),
Stmts: litStmts("bar"),
},
},
},
},
{
[]string{"foo | while read a; do b; done"},
BinaryExpr{
Op: OR,
X: litStmt("foo"),
Y: stmt(WhileStmt{
Conds: []Stmt{litStmt("read", "a")},
DoStmts: litStmts("b"),
}),
},
},
{
[]string{"while read l; do foo || bar; done"},
WhileStmt{
Conds: []Stmt{litStmt("read", "l")},
DoStmts: stmts(BinaryExpr{
Op: LOR,
X: litStmt("foo"),
Y: litStmt("bar"),
}),
},
},
{
[]string{"echo if while"},
litCmd("echo", "if", "while"),
},
{
[]string{"echo ${foo}if"},
cmd(
litWord("echo"),
word(ParamExp{Text: "foo"}, lit("if")),
),
},
{
[]string{"echo $if"},
cmd(
litWord("echo"),
word(ParamExp{Short: true, Text: "if"}),
),
},
{
[]string{"if; then; fi", "if\nthen\nfi"},
IfStmt{},
},
{
[]string{"while; do; done", "while\ndo\ndone"},
WhileStmt{},
},
{
[]string{"while; do; done", "while\ndo\n#foo\ndone"},
WhileStmt{},
},
{
[]string{"until; do; done", "until\ndo\ndone"},
UntilStmt{},
},
{
[]string{"for i; do; done", "for i\ndo\ndone"},
ForStmt{Name: lit("i")},
},
{
[]string{"case i in; esac"},
CaseStmt{Word: litWord("i")},
},
{
[]string{"f1 && f2 | bar"},
BinaryExpr{
Op: OR,
X: stmt(BinaryExpr{
Op: LAND,
X: litStmt("f1"),
Y: litStmt("f2"),
}),
Y: litStmt("bar"),
},
},
{
[]string{"foo | b1 && b2"},
BinaryExpr{
Op: OR,
X: litStmt("foo"),
Y: stmt(BinaryExpr{
Op: LAND,
X: litStmt("b1"),
Y: litStmt("b2"),
}),
},
},
{
[]string{"foo >f | bar"},
BinaryExpr{
Op: OR,
X: Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: RDROUT, Word: litWord("f")},
},
},
Y: litStmt("bar"),
},
},
{
[]string{"foo >f || bar"},
BinaryExpr{
Op: LOR,
X: Stmt{
Node: litCmd("foo"),
Redirs: []Redirect{
{Op: RDROUT, Word: litWord("f")},
},
},
Y: litStmt("bar"),
},
},
}
func fullProg(v interface{}) (f File) {
switch x := v.(type) {
case []Stmt:
f.Stmts = x
case Stmt:
f.Stmts = append(f.Stmts, x)
case []Node:
for _, n := range x {
f.Stmts = append(f.Stmts, stmt(n))
}
case Node:
f.Stmts = append(f.Stmts, stmt(x))
}
return
}
func setPosRecurse(t *testing.T, v interface{}, to Pos, diff bool) Node {
setPos := func(p *Pos) {
if diff && *p == to {
t.Fatalf("Pos in %v (%T) is already %v", v, v, to)
}
*p = to
}
switch x := v.(type) {
case []Stmt:
for i := range x {
setPosRecurse(t, &x[i], to, diff)
}
case *Stmt:
setPos(&x.Position)
x.Node = setPosRecurse(t, x.Node, to, diff)
for i := range x.Redirs {
setPos(&x.Redirs[i].OpPos)
setPosRecurse(t, &x.Redirs[i].N, to, diff)
setPosRecurse(t, &x.Redirs[i].Word, to, diff)
}
case Command:
setPosRecurse(t, x.Args, to, diff)
return x
case []Word:
for i := range x {
setPosRecurse(t, &x[i], to, diff)
}
case *Word:
setPosRecurse(t, x.Parts, to, diff)
case []Node:
for i := range x {
x[i] = setPosRecurse(t, x[i], to, diff)
}
case *Lit:
setPos(&x.ValuePos)
case Lit:
setPos(&x.ValuePos)
return x
case Subshell:
setPos(&x.Lparen)
setPos(&x.Rparen)
setPosRecurse(t, x.Stmts, to, diff)
return x
case Block:
setPos(&x.Lbrace)
setPos(&x.Rbrace)
setPosRecurse(t, x.Stmts, to, diff)
return x
case IfStmt:
setPos(&x.If)
setPos(&x.Fi)
setPosRecurse(t, x.Conds, to, diff)
setPosRecurse(t, x.ThenStmts, to, diff)
for i := range x.Elifs {
setPos(&x.Elifs[i].Elif)
setPosRecurse(t, x.Elifs[i].Conds, to, diff)
setPosRecurse(t, x.Elifs[i].ThenStmts, to, diff)
}
setPosRecurse(t, x.ElseStmts, to, diff)
return x
case WhileStmt:
setPos(&x.While)
setPos(&x.Done)
setPosRecurse(t, x.Conds, to, diff)
setPosRecurse(t, x.DoStmts, to, diff)
return x
case UntilStmt:
setPos(&x.Until)
setPos(&x.Done)
setPosRecurse(t, x.Conds, to, diff)
setPosRecurse(t, x.DoStmts, to, diff)
return x
case ForStmt:
setPos(&x.For)
setPos(&x.Done)
setPosRecurse(t, &x.Name, to, diff)
setPosRecurse(t, x.WordList, to, diff)
setPosRecurse(t, x.DoStmts, to, diff)
return x
case DblQuoted:
setPos(&x.Quote)
setPosRecurse(t, x.Parts, to, diff)
return x
case BckQuoted:
setPos(&x.Quote)
setPosRecurse(t, x.Stmts, to, diff)
return x
case BinaryExpr:
setPos(&x.OpPos)
setPosRecurse(t, &x.X, to, diff)
setPosRecurse(t, &x.Y, to, diff)
return x
case FuncDecl:
setPosRecurse(t, &x.Name, to, diff)
setPosRecurse(t, &x.Body, to, diff)
return x
case ParamExp:
setPos(&x.Exp)
return x
case ArithmExp:
setPos(&x.Exp)
return x
case CmdSubst:
setPos(&x.Exp)
setPos(&x.Rparen)
setPosRecurse(t, x.Stmts, to, diff)
return x
case CaseStmt:
setPos(&x.Case)
setPos(&x.Esac)
setPosRecurse(t, &x.Word, to, diff)
for _, pl := range x.List {
setPosRecurse(t, pl.Patterns, to, diff)
setPosRecurse(t, pl.Stmts, to, diff)
}
return x
case nil:
default:
panic(v)
}
return nil
}
func TestNodePos(t *testing.T) {
p := Pos{
Line: 12,
Column: 34,
}
defaultPos = p
allTests := astTests
for _, v := range []interface{}{
Command{},
Command{Args: []Word{
{},
}},
} {
allTests = append(allTests, testCase{nil, v})
}
for _, c := range allTests {
want := fullProg(c.ast)
setPosRecurse(t, want.Stmts, p, true)
for _, s := range want.Stmts {
if s.Pos() != p {
t.Fatalf("Found unexpected Pos in %v", s)
}
n := s.Node
if n != nil && n.Pos() != p {
t.Fatalf("Found unexpected Pos in %v", n)
}
}
}
}
func TestParseAST(t *testing.T) {
for _, c := range astTests {
want := fullProg(c.ast)
setPosRecurse(t, want.Stmts, Pos{}, false)
for _, in := range c.strs {
r := strings.NewReader(in)
got, err := Parse(r, "")
if err != nil {
t.Fatalf("Unexpected error in %q: %v", in, err)
}
setPosRecurse(t, got.Stmts, Pos{}, true)
if !reflect.DeepEqual(got, want) {
t.Fatalf("AST mismatch in %q\nwant: %s\ngot: %s\ndiff:\n%s",
in, want, got,
strings.Join(pretty.Diff(want, got), "\n"),
)
}
}
}
}
func TestPrintAST(t *testing.T) {
for _, c := range astTests {
in := fullProg(c.ast)
want := c.strs[0]
got := in.String()
if got != want {
t.Fatalf("AST print mismatch\nwant: %s\ngot: %s",
want, got)
}
}
}
|
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package planner
import (
"bytes"
"context"
"fmt"
"strings"
"testing"
"github.com/google/badwolf/bql/grammar"
"github.com/google/badwolf/bql/semantic"
"github.com/google/badwolf/io"
"github.com/google/badwolf/storage"
"github.com/google/badwolf/storage/memory"
"github.com/google/badwolf/triple"
"github.com/google/badwolf/triple/literal"
)
const (
originalTriples = `/u<joe> "parent_of"@[] /u<mary>
/u<joe> "parent_of"@[] /u<peter>
/u<peter> "parent_of"@[] /u<john>
/u<peter> "parent_of"@[] /u<eve>
/u<peter> "bought"@[2016-01-01T00:00:00-08:00] /c<mini>
/u<peter> "bought"@[2016-02-01T00:00:00-08:00] /c<model s>
/u<peter> "bought"@[2016-03-01T00:00:00-08:00] /c<model x>
/u<peter> "bought"@[2016-04-01T00:00:00-08:00] /c<model y>
/c<mini> "is_a"@[] /t<car>
/c<model s> "is_a"@[] /t<car>
/c<model x> "is_a"@[] /t<car>
/c<model y> "is_a"@[] /t<car>
/l<barcelona> "predicate"@[] "turned"@[2016-01-01T00:00:00-08:00]
/l<barcelona> "predicate"@[] "turned"@[2016-02-01T00:00:00-08:00]
/l<barcelona> "predicate"@[] "turned"@[2016-03-01T00:00:00-08:00]
/l<barcelona> "predicate"@[] "turned"@[2016-04-01T00:00:00-08:00]
/u<alice> "height_cm"@[] "174"^^type:int64
/u<alice> "tag"@[] "abc"^^type:text
/u<bob> "height_cm"@[] "151"^^type:int64
/u<charlie> "height_cm"@[] "174"^^type:int64
/u<delta> "height_cm"@[] "174"^^type:int64
`
tripleFromIssue40 = `/room<Hallway> "connects_to"@[] /room<Kitchen>
/room<Kitchen> "connects_to"@[] /room<Hallway>
/room<Kitchen> "connects_to"@[] /room<Bathroom>
/room<Kitchen> "connects_to"@[] /room<Bedroom>
/room<Bathroom> "connects_to"@[] /room<Kitchen>
/room<Bedroom> "connects_to"@[] /room<Kitchen>
/room<Bedroom> "connects_to"@[] /room<Fire Escape>
/room<Fire Escape> "connects_to"@[] /room<Kitchen>
/item/book<000> "in"@[2016-04-10T4:21:00.000000000Z] /room<Hallway>
/item/book<000> "in"@[2016-04-10T4:23:00.000000000Z] /room<Kitchen>
/item/book<000> "in"@[2016-04-10T4:25:00.000000000Z] /room<Bedroom>
`
constructTestSrcTriples = `/person<A> "met"@[] /person<B>
/person<B> "met"@[] /person<C>
/person<C> "met"@[] /person<D>
/person<A> "met_at"@[2016-04-10T4:25:00.000000000Z] /person<B>
/person<B> "met_at"@[2016-04-10T4:25:00.000000000Z] /person<C>
/city<A> "is_connected_to"@[] /city<B>
/city<A> "is_connected_to"@[] /city<C>
/city<B> "is_connected_to"@[] /city<D>
/city<B> "is_connected_to"@[] /city<E>
/city<C> "is_connected_to"@[] /city<D>
`
constructTestDestTriples = `/person<D> "met"@[] /person<E>
`
deconstructTestSrcTriples = `/person<A> "lives_in"@[] /city<A>
/person<B> "lives_in"@[] /city<B>
/person<C> "lives_in"@[] /city<A>
/person<D> "lives_in"@[] /city<B>
`
deconstructTestDestTriples = `/person<A> "met"@[] /person<B>
/person<B> "met"@[] /person<C>
/person<C> "met"@[] /person<D>
/person<D> "met"@[] /person<A>
/person<A> "met"@[] /person<C>
/person<B> "met"@[] /person<D>
`
testTriples = originalTriples + tripleFromIssue40
)
func insertAndDeleteTest(t *testing.T) {
ctx := context.Background()
// Testing insertion of triples.
bql := `insert data into ?a {/_<foo> "bar"@[] /_<foo> .
/_<foo> "bar"@[] "bar"@[1975-01-01T00:01:01.999999999Z] .
/_<foo> "bar"@[] "yeah"^^type:text};`
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
stm := &semantic.Statement{}
if err = p.Parse(grammar.NewLLk(bql, 1), stm); err != nil {
t.Errorf("Parser.consume: failed to accept BQL %q with error %v", bql, err)
}
pln, err := New(ctx, memory.DefaultStore, stm, 0, 10, nil)
if err != nil {
t.Errorf("planner.New: should have not failed to create a plan using memory.DefaultStorage for statement %v with error %v", stm, err)
}
if _, err = pln.Execute(ctx); err != nil {
t.Errorf("planner.Execute: failed to execute insert plan with error %v", err)
}
g, err := memory.DefaultStore.Graph(ctx, "?a")
if err != nil {
t.Errorf("memory.DefaultStore.Graph(%q) should have not fail with error %v", "?a", err)
}
i := 0
ts := make(chan *triple.Triple)
go func() {
if err := g.Triples(ctx, storage.DefaultLookup, ts); err != nil {
t.Error(err)
}
}()
for range ts {
i++
}
if i != 3 {
t.Errorf("g.Triples should have returned 3 triples, returned %d instead", i)
}
// Testing deletion of triples.
bql = `delete data from ?a {/_<foo> "bar"@[] /_<foo> .
/_<foo> "bar"@[] "bar"@[1975-01-01T00:01:01.999999999Z] .
/_<foo> "bar"@[] "yeah"^^type:text};`
p, err = grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
stm = &semantic.Statement{}
if err = p.Parse(grammar.NewLLk(bql, 1), stm); err != nil {
t.Errorf("Parser.consume: failed to accept BQL %q with error %v", bql, err)
}
pln, err = New(ctx, memory.DefaultStore, stm, 0, 10, nil)
if err != nil {
t.Errorf("planner.New: should have not failed to create a plan using memory.DefaultStorage for statement %v with error %v", stm, err)
}
if _, err = pln.Execute(ctx); err != nil {
t.Errorf("planner.Execute: failed to execute insert plan with error %v", err)
}
g, err = memory.DefaultStore.Graph(ctx, "?a")
if err != nil {
t.Errorf("memory.DefaultStore.Graph(%q) should have not fail with error %v", "?a", err)
}
i = 0
ts = make(chan *triple.Triple)
if err = g.Triples(ctx, storage.DefaultLookup, ts); err != nil {
t.Error(err)
}
for range ts {
i++
}
if i != 0 {
t.Errorf("g.Triples should have returned 0 triples, returned %d instead", i)
}
}
func TestPlannerInsertDeleteDoesNotFail(t *testing.T) {
ctx := context.Background()
if _, err := memory.DefaultStore.NewGraph(ctx, "?a"); err != nil {
t.Errorf("memory.DefaultStore.NewGraph(%q) should have not failed with error %v", "?a", err)
}
insertAndDeleteTest(t)
if err := memory.DefaultStore.DeleteGraph(ctx, "?a"); err != nil {
t.Errorf("memory.DefaultStore.DeleteGraph(%q) should have not failed with error %v", "?a", err)
}
}
func TestPlannerCreateGraph(t *testing.T) {
ctx := context.Background()
memory.DefaultStore.DeleteGraph(ctx, "?foo")
memory.DefaultStore.DeleteGraph(ctx, "?bar")
bql := `create graph ?foo, ?bar;`
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
stm := &semantic.Statement{}
if err = p.Parse(grammar.NewLLk(bql, 1), stm); err != nil {
t.Errorf("Parser.consume: failed to accept BQL %q with error %v", bql, err)
}
pln, err := New(ctx, memory.DefaultStore, stm, 0, 10, nil)
if err != nil {
t.Errorf("planner.New: should have not failed to create a plan using memory.DefaultStorage for statement %v with error %v", stm, err)
}
if _, err := pln.Execute(ctx); err != nil {
t.Errorf("planner.Execute: failed to execute insert plan with error %v", err)
}
if _, err := memory.DefaultStore.Graph(ctx, "?foo"); err != nil {
t.Errorf("planner.Execute: failed to create graph %q with error %v", "?foo", err)
}
if _, err := memory.DefaultStore.Graph(ctx, "?bar"); err != nil {
t.Errorf("planner.Execute: failed to create graph %q with error %v", "?bar", err)
}
}
func TestPlannerDropGraph(t *testing.T) {
ctx := context.Background()
memory.DefaultStore.DeleteGraph(ctx, "?foo")
memory.DefaultStore.DeleteGraph(ctx, "?bar")
memory.DefaultStore.NewGraph(ctx, "?foo")
memory.DefaultStore.NewGraph(ctx, "?bar")
bql := `drop graph ?foo, ?bar;`
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
stm := &semantic.Statement{}
if err = p.Parse(grammar.NewLLk(bql, 1), stm); err != nil {
t.Errorf("Parser.consume: failed to accept BQL %q with error %v", bql, err)
}
pln, err := New(ctx, memory.DefaultStore, stm, 0, 10, nil)
if err != nil {
t.Errorf("planner.New: should have not failed to create a plan using memory.DefaultStorage for statement %v with error %v", stm, err)
}
if _, err := pln.Execute(ctx); err != nil {
t.Errorf("planner.Execute: failed to execute insert plan with error %v", err)
}
if g, err := memory.DefaultStore.Graph(ctx, "?foo"); err == nil {
t.Errorf("planner.Execute: failed to drop graph %q; returned %v", "?foo", g)
}
if g, err := memory.DefaultStore.Graph(ctx, "?bar"); err == nil {
t.Errorf("planner.Execute: failed to drop graph %q; returned %v", "?bar", g)
}
}
func populateStoreWithTriples(ctx context.Context, s storage.Store, gn string, triples string, tb testing.TB) {
g, err := s.NewGraph(ctx, gn)
if err != nil {
tb.Fatalf("memory.NewGraph failed to create \"%v\" with error %v", gn, err)
}
b := bytes.NewBufferString(triples)
if _, err := io.ReadIntoGraph(ctx, g, b, literal.DefaultBuilder()); err != nil {
tb.Fatalf("io.ReadIntoGraph failed to read test graph with error %v", err)
}
trpls := make(chan *triple.Triple)
go func() {
if err := g.Triples(ctx, storage.DefaultLookup, trpls); err != nil {
tb.Fatal(err)
}
}()
cnt := 0
for range trpls {
cnt++
}
if got, want := cnt, len(strings.Split(triples, "\n"))-1; got != want {
tb.Fatalf("Failed to import all test triples; got %v, want %v", got, want)
}
}
func TestPlannerQuery(t *testing.T) {
testTable := []struct {
q string
nBindings int
nRows int
}{
{
q: `select ?s, ?p, ?o from ?test where {?s ?p ?o};`,
nBindings: 3,
nRows: len(strings.Split(testTriples, "\n")) - 1,
},
{
q: `select ?s as ?s1, ?p as ?p1, ?o as ?o1 from ?test where {?s ?p ?o};`,
nBindings: 3,
nRows: len(strings.Split(testTriples, "\n")) - 1,
},
{
q: `select ?p, ?o from ?test where {/u<joe> ?p ?o};`,
nBindings: 2,
nRows: 2,
},
{
q: `select ?p as ?p1, ?o as ?o1 from ?test where {/u<joe> ?p ?o};`,
nBindings: 2,
nRows: 2,
},
{
q: `select ?s, ?p from ?test where {?s ?p /t<car>};`,
nBindings: 2,
nRows: 4,
},
{
q: `select ?s, ?o from ?test where {?s "parent_of"@[] ?o};`,
nBindings: 2,
nRows: 4,
},
{
q: `select ?s, ?p, ?o from ?test where {/u<joe> as ?s "parent_of"@[] as ?p /u<mary> as ?o};`,
nBindings: 3,
nRows: 1,
},
{
q: `select ?s, ?p, ?o from ?test where {/u<unknown> as ?s "parent_of"@[] as ?p /u<mary> as ?o};`,
nBindings: 3,
nRows: 0,
},
{
q: `select ?o from ?test where {/u<joe> "parent_of"@[] ?o};`,
nBindings: 1,
nRows: 2,
},
{
q: `select ?p from ?test where {/u<joe> ?p /u<mary>};`,
nBindings: 1,
nRows: 1,
},
{
q: `select ?s from ?test where {?s "is_a"@[] /t<car>};`,
nBindings: 1,
nRows: 4,
},
{
q: `select ?s as ?s1 from ?test where {?s "is_a"@[] /t<car>};`,
nBindings: 1,
nRows: 4,
},
{
q: `select ?o from ?test where {/u<joe> "parent_of"@[] ?o. ?o "parent_of"@[] /u<john>};`,
nBindings: 1,
nRows: 1,
},
{
q: `select ?s, ?o from ?test where {/u<joe> "parent_of"@[] ?o. ?o "parent_of"@[] ?s};`,
nBindings: 2,
nRows: 2,
},
{
q: `select ?s, ?p, ?o, ?k, ?l, ?m from ?test where {?s ?p ?o. ?k ?l ?m};`,
nBindings: 6,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?s, ?p, ?o, ?k, ?l from ?test where {?s ?p ?o. ?k ?l ?m};`,
nBindings: 5,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?s, ?p, ?o, ?k from ?test where {?s ?p ?o. ?k ?l ?m};`,
nBindings: 4,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?s, ?p, ?o from ?test where {?s ?p ?o. ?k ?l ?m};`,
nBindings: 3,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?s, ?p from ?test where {?s ?p ?o. ?k ?l ?m};`,
nBindings: 2,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?s from ?test where {?s ?p ?o. ?k ?l ?m};`,
nBindings: 1,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[,] ?o};`,
nBindings: 1,
nRows: 4,
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[,2015-01-01T00:00:00-08:00] ?o};`,
nBindings: 1,
nRows: 0,
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[2017-01-01T00:00:00-08:00,] ?o};`,
nBindings: 1,
nRows: 0,
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] ?o};`,
nBindings: 1,
nRows: 4,
},
{
q: `select ?o from ?test where {/l<barcelona> "predicate"@[] "turned"@[,] as ?o};`,
nBindings: 1,
nRows: 4,
},
{
q: `select ?o from ?test where {/l<barcelona> "predicate"@[] "turned"@[,2015-01-01T00:00:00-08:00] as ?o};`,
nBindings: 1,
nRows: 0,
},
{
q: `select ?o from ?test where {/l<barcelona> "predicate"@[] "turned"@[2017-01-01T00:00:00-08:00,] as ?o};`,
nBindings: 1,
nRows: 0,
},
{
q: `select ?o from ?test where {/l<barcelona> "predicate"@[] "turned"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] as ?o};`,
nBindings: 1,
nRows: 4,
},
{
q: `select ?grandparent, count(?name) as ?grandchildren from ?test where {/u<joe> as ?grandparent "parent_of"@[] ?offspring . ?offspring "parent_of"@[] ?name} group by ?grandparent;`,
nBindings: 2,
nRows: 1,
},
{
q: `select ?grandparent, count(distinct ?name) as ?grandchildren from ?test where {/u<joe> as ?grandparent "parent_of"@[] ?offspring . ?offspring "parent_of"@[] ?name} group by ?grandparent;`,
nBindings: 2,
nRows: 1,
},
{
q: `select ?s, ?p, ?o, ?k, ?l, ?m from ?test where {?s ?p ?o. ?k ?l ?m} order by ?s, ?p, ?o, ?k, ?l, ?m;`,
nBindings: 6,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?s, ?p, ?o, ?k, ?l, ?m from ?test where {?s ?p ?o. ?k ?l ?m} order by ?s, ?p, ?o, ?k, ?l, ?m having not(?s = ?s);`,
nBindings: 6,
nRows: 0,
},
{
q: `select ?o from ?test where {/l<barcelona> "predicate"@[] "turned"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] as ?o} LIMIT "2"^^type:int64;`,
nBindings: 1,
nRows: 2,
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] ?o} before 2016-03-01T00:00:00-08:00;`,
nBindings: 1,
nRows: 3,
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] ?o} after 2016-02-01T00:00:00-08:00;`,
nBindings: 1,
nRows: 3,
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] ?o} between 2014-01-01T00:00:00-08:00, 2017-01-01T00:00:00-08:00;`,
nBindings: 1,
nRows: 4,
},
{
q: `SELECT ?grandparent, COUNT(?grandparent) AS ?number_of_grandchildren FROM ?test WHERE{ ?gp ID ?grandparent "parent_of"@[] ?c . ?c "parent_of"@[] ?gc ID ?gc } GROUP BY ?grandparent;`,
nBindings: 2,
nRows: 1,
},
{ // Issue 40 (https://github.com/google/badwolf/issues/40)
q: `SELECT ?item, ?t FROM ?test WHERE {?item "in"@[?t] /room<Bedroom>};`,
nBindings: 2,
nRows: 1,
},
{
q: `SHOW GRAPHS;`,
nBindings: 1,
nRows: 1,
},
{
q: `select ?s, ?o from ?test where {?s "tag"@[] ?o} having ?o = "abc"^^type:text;`,
nBindings: 2,
nRows: 1,
},
{
q: `select ?s, ?height from ?test where {?s "height_cm"@[] ?height} having ?height > "0"^^type:int64;`,
nBindings: 2,
nRows: 4,
},
{
q: `select ?s, ?height from ?test where {?s "height_cm"@[] ?height} having ?height > "160"^^type:int64;`,
nBindings: 2,
nRows: 3,
},
{
q: `select ?s, ?height from ?test where {?s "height_cm"@[] ?height} having ?height = "151"^^type:int64;`,
nBindings: 2,
nRows: 1,
},
{
q: `select ?s, ?height from ?test where {?s "height_cm"@[] ?height} having ?s > /u<zzzzz>;`,
nBindings: 2,
nRows: 0,
},
{
q: `select ?s, ?height from ?test where {?s "height_cm"@[] ?height} having ?s > /u<alice>;`,
nBindings: 2,
nRows: 3,
},
{
q: `select ?s, ?height from ?test where {?s "height_cm"@[] ?height} having ?s > /u<bob>;`,
nBindings: 2,
nRows: 2,
},
/*
/c<model s> "is_a"@[] /t<car>
/c<model x> "is_a"@[] /t<car>
/c<model y> "is_a"@[] /t<car>
*/
// OPTIONAL clauses.
{
q: `SELECT ?car FROM ?test WHERE { ?car "is_a"@[] /t<car> };`,
nBindings: 1,
nRows: 4,
},
{
q: `SELECT ?car
FROM ?test
WHERE {
/c<model s> as ?car "is_a"@[] /t<car>
};`,
nBindings: 1,
nRows: 1,
},
{
q: `SELECT ?car
FROM ?test
WHERE {
?car "is_a"@[] /t<car> .
/c<model z> as ?car "is_a"@[] /t<car>
};`,
nBindings: 1,
nRows: 0,
},
{
q: `SELECT ?car
FROM ?test
WHERE {
?car "is_a"@[] /t<car> .
OPTIONAL { /c<model O> "is_a"@[] /t<car> }
};`,
nBindings: 1,
nRows: 4,
},
{
q: `SELECT ?car
FROM ?test
WHERE {
?car "is_a"@[] /t<car> .
OPTIONAL { ?car "is_a"@[] /t<car> }
};`,
nBindings: 1,
nRows: 4,
},
{
q: `SELECT ?cars, ?type
FROM ?test
WHERE {
?cars "is_a"@[] /t<car> .
OPTIONAL { ?cars "is_a"@[] ?type }
};`,
nBindings: 2,
nRows: 4,
},
{
q: `SELECT ?p, ?o
FROM ?test
WHERE {
/c<mini> ?p ?o
}
HAVING ?o > "37"^^type:int64;`,
nBindings: 2,
nRows: 0,
},
{
q: `SELECT ?o
FROM ?test
WHERE {
/u<alice> "height_cm"@[] ?o
}
HAVING ?o = /u<peter>;`,
nBindings: 1,
nRows: 0,
},
{
q: `SELECT ?s_id, ?height
FROM ?test
WHERE {
?s ID ?s_id "height_cm"@[] ?height
}
HAVING ?s_id = "alice"^^type:text;`,
nBindings: 2,
nRows: 1,
},
{
q: `SELECT ?p_id, ?o
FROM ?test
WHERE {
/u<peter> ?p ID ?p_id ?o
}
HAVING ?p_id < "parent_of"^^type:text;`,
nBindings: 2,
nRows: 4,
},
{
q: `SELECT ?s, ?s_type
FROM ?test
WHERE {
?s TYPE ?s_type ?p ?o
}
HAVING ?s_type = "/c"^^type:text;`,
nBindings: 2,
nRows: 4,
},
{
q: `SELECT ?s, ?s_type
FROM ?test
WHERE {
?s TYPE ?s_type ?p ?o
}
HAVING ?s_type < "/l"^^type:text;`,
nBindings: 2,
nRows: 7,
},
{
q: `SELECT ?p, ?p_id, ?o
FROM ?test
WHERE {
/u<peter> ?p ID ?p_id ?o
}
HAVING ?p_id = "bought"^^type:text
AFTER 2016-03-01T00:00:00-08:00;`,
nBindings: 3,
nRows: 2,
},
{
q: `SELECT ?p, ?p_id, ?o
FROM ?test
WHERE {
/u<peter> ?p ID ?p_id ?o
}
HAVING ?p_id < "parent_of"^^type:text
BEFORE 2016-03-01T00:00:00-08:00;`,
nBindings: 3,
nRows: 3,
},
{
q: `SELECT ?p, ?p_id, ?o
FROM ?test
WHERE {
/u<peter> ?p ID ?p_id ?o
}
HAVING ?p_id = "bought"^^type:text
BETWEEN 2016-02-01T00:00:00-08:00, 2016-03-01T00:00:00-08:00;`,
nBindings: 3,
nRows: 2,
},
{
q: `SELECT ?p, ?p_id, ?o
FROM ?test
WHERE {
/u<peter> ?p ID ?p_id ?o
}
HAVING ?p_id < "work_with"^^type:text
BEFORE 2016-02-01T00:00:00-08:00;`,
nBindings: 3,
nRows: 4,
},
{
q: `SELECT ?o, ?o_type
FROM ?test
WHERE {
?s ?p ?o TYPE ?o_type
}
HAVING (?s = /u<joe>) OR (?s = /l<barcelona>) OR (?s = /u<alice>);`,
nBindings: 2,
nRows: 2,
},
{
q: `SELECT ?p, ?time, ?o
FROM ?test
WHERE {
/u<peter> ?p AT ?time ?o
};`,
nBindings: 3,
nRows: 4,
},
{
q: `SELECT ?s, ?time, ?o
FROM ?test
WHERE {
?s "parent_of"@[?time] ?o
};`,
nBindings: 3,
nRows: 0,
},
{
q: `SELECT ?s, ?s_alias, ?s_id, ?s_type
FROM ?test
WHERE {
?s AS ?s_alias ID ?s_id TYPE ?s_type "parent_of"@[] ?o
};`,
nBindings: 4,
nRows: 4,
},
{
q: `SELECT ?s, ?s_alias, ?s_id, ?s_type
FROM ?test
WHERE {
?s AS ?s_alias TYPE ?s_type ID ?s_id "parent_of"@[] ?o
};`,
nBindings: 4,
nRows: 4,
},
{
q: `SELECT ?o, ?o_alias, ?o_id, ?o_type
FROM ?test
WHERE {
?s "parent_of"@[] ?o AS ?o_alias ID ?o_id TYPE ?o_type
};`,
nBindings: 4,
nRows: 4,
},
{
q: `SELECT ?o, ?o_alias, ?o_id, ?o_type
FROM ?test
WHERE {
?s "parent_of"@[] ?o AS ?o_alias TYPE ?o_type ID ?o_id
};`,
nBindings: 4,
nRows: 4,
},
{
q: `SELECT ?o
FROM ?test
WHERE {
/u<peter> "bought"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] ?o
}
BEFORE 2016-03-01T00:00:00-08:00
LIMIT "1"^^type:int64;`,
nBindings: 1,
nRows: 1,
},
{
q: `SELECT ?o
FROM ?test
WHERE {
/u<peter> "bought"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] ?o
}
AFTER 2016-02-01T00:00:00-08:00
LIMIT "1"^^type:int64;`,
nBindings: 1,
nRows: 1,
},
{
q: `SELECT ?p, ?time
FROM ?test
WHERE {
/u<peter> ?p AT ?time ?o
}
HAVING ?time < 2016-03-01T00:00:00-08:00;`,
nBindings: 2,
nRows: 2,
},
{
q: `SELECT ?p, ?time
FROM ?test
WHERE {
/u<peter> ?p AT ?time ?o
}
HAVING ?time < 2016-03-01T00:00:00-08:00
LIMIT "1"^^type:int64;`,
nBindings: 2,
nRows: 1,
},
{
q: `SELECT ?s, ?p, ?p_id, ?time
FROM ?test
WHERE {
?s ?p ID ?p_id AT ?time ?o
}
HAVING (?p_id < "in"^^type:text) AND (?time > 2016-02-01T00:00:00-08:00);`,
nBindings: 4,
nRows: 2,
},
{
q: `SELECT ?p, ?time
FROM ?test
WHERE {
/u<peter> ?p AT ?time ?o
}
HAVING ?time = 2016-01-01T01:00:00-07:00;`,
nBindings: 2,
nRows: 1,
},
{
q: `SELECT ?p, ?time
FROM ?test
WHERE {
/u<peter> ?p AT ?time ?o
}
HAVING ?time > 2016-02-01T00:00:00-07:00;`,
nBindings: 2,
nRows: 3,
},
{
q: `SELECT ?s, ?p, ?o
FROM ?test
WHERE {
?s ?p ?o
}
HAVING ?p = "height_cm"@[];`,
nBindings: 3,
nRows: 4,
},
{
q: `SELECT ?s, ?p, ?o
FROM ?test
WHERE {
?s ?p ?o
}
HAVING ?p = "bought"@[2016-03-01T00:00:00-08:00];`,
nBindings: 3,
nRows: 1,
},
{
q: `SELECT ?s, ?p, ?o
FROM ?test
WHERE {
?s ?p ?o
}
HAVING (?p = "tag"@[]) OR (?p = "bought"@[2016-02-01T00:00:00-08:00]);`,
nBindings: 3,
nRows: 2,
},
{
q: `SELECT ?p, ?o
FROM ?test
WHERE {
/u<joe> ?p ?o .
};`,
nBindings: 2,
nRows: 2,
},
{
q: `SELECT ?o
FROM ?test
WHERE {
/u<joe> "parent_of"@[] ?o .
?o "parent_of"@[] /u<john> .
};`,
nBindings: 1,
nRows: 1,
},
{
q: `SELECT ?p1, ?p2
FROM ?test
WHERE {
/u<joe> ?p1 /u<mary> .
/u<joe> ?p2 /u<peter> .
};`,
nBindings: 2,
nRows: 1,
},
{
q: `SELECT ?car
FROM ?test
WHERE {
?car "is_a"@[] /t<car> .
OPTIONAL { /c<model O> "is_a"@[] /t<car> } .
};`,
nBindings: 1,
nRows: 4,
},
}
s, ctx := memory.NewStore(), context.Background()
populateStoreWithTriples(ctx, s, "?test", testTriples, t)
for _, entry := range testTable {
// Setup for test:
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(entry.q, 1), st); err != nil {
t.Fatalf("parser.Parse failed for query \"%s\"\nwith error: %v", entry.q, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Fatalf("planner.New failed to create a valid query plan with error: %v", err)
}
// Actual test:
tbl, err := plnr.Execute(ctx)
if err != nil {
t.Fatalf("planner.Execute(%s)\n= _, %v; want _, nil", entry.q, err)
}
if got, want := len(tbl.Bindings()), entry.nBindings; got != want {
t.Errorf("planner.Execute(%s)\n= a Table with %d bindings; want %d", entry.q, got, want)
}
if got, want := len(tbl.Rows()), entry.nRows; got != want {
t.Errorf("planner.Execute(%s)\n= a Table with %d rows; want %d\nTable:\n%v\n", entry.q, got, want, tbl)
}
}
}
func TestPlannerQueryError(t *testing.T) {
testTable := []struct {
q string
}{
{
q: `SELECT ?s_id, ?height
FROM ?test
WHERE {
?s ID ?s_id "height_cm"@[] ?height
}
HAVING ?s_id > "37"^^type:int64;`,
},
{
q: `SELECT ?s_id, ?height
FROM ?test
WHERE {
?s ID ?s_id "height_cm"@[] ?height
}
HAVING ?s_id = /u<alice>;`,
},
{
q: `SELECT ?s, ?p, ?o
FROM ?test
WHERE {
?s ?p ?o
}
HAVING ?p < "height_cm"@[];`,
},
{
q: `SELECT ?s, ?p, ?o
FROM ?test
WHERE {
?s ?p ?o
}
HAVING ?p > "bought"@[2016-01-01T00:00:00-08:00];`,
},
}
s, ctx := memory.NewStore(), context.Background()
populateStoreWithTriples(ctx, s, "?test", testTriples, t)
for _, entry := range testTable {
// Setup for test:
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(entry.q, 1), st); err != nil {
t.Fatalf("parser.Parse failed for query \"%s\"\nwith error: %v", entry.q, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Fatalf("planner.New failed to create a valid query plan with error: %v", err)
}
// Actual test:
_, err = plnr.Execute(ctx)
if err == nil {
t.Errorf("planner.Execute(%s)\n= _, nil; want _, error", entry.q)
}
}
}
func TestPlannerConstructAddsCorrectNumberofTriples(t *testing.T) {
sts, dts := len(strings.Split(constructTestSrcTriples, "\n"))-1, len(strings.Split(constructTestDestTriples, "\n"))-1
testTable := []struct {
s string
trps int
}{
{
s: `construct {?s ?p ?o}
into ?dest
from ?src
where {?s ?p ?o};`,
trps: sts + dts,
},
{
s: `construct {?s "met"@[] ?o; "location"@[] /city<New York>}
into ?dest
from ?src
where {?s "met"@[] ?o};`,
// 3 matching triples * 4 new triples per matched triple due to reification + 1 triple in dest graph.
trps: 3*4 + dts,
},
{
s: `construct {?s "met"@[] ?o; "location"@[] /city<New York>;
"outcome"@[] "good"^^type:text }
into ?dest
from ?src
where {?s "met"@[] ?o};`,
// 3 matching triples * 5 new triples per matched triple due to reification + 1 triple in dest graph.
trps: 3*5 + dts,
},
{
s: `construct {?s "met"@[?t] ?o; "location"@[] /city<New York>;
"outcome"@[] "good"^^type:text .
?s "connected_to"@[] ?o}
into ?dest
from ?src
where {?s "met"@[] ?o.
?s "met_at"@[?t] ?o};`,
// 2 matching triples * (5 new triples due to reification + 1 explicitly constructed triple per matched triple) +
// 1 triple in dest graph.
trps: 2*6 + dts,
},
{
s: `construct {?s "met"@[?t] ?o; "location"@[] /city<New York>;
"outcome"@[] "good"^^type:text .
?s "connected_to"@[] ?o; "at"@[?t] /city<New York> }
into ?dest
from ?src
where {?s "met"@[] ?o.
?s "met_at"@[?t] ?o};`,
// 2 matching triples * 9 new triples due to reification + 1 triple in dest graph.
trps: 2*9 + dts,
},
{
s: `construct {?d2 "is_2_hops_from"@[] ?s1 }
into ?dest
from ?src
where {?s1 "is_connected_to"@[] ?d1.
?d1 "is_connected_to"@[] ?d2};`,
// 2 new triples (/city<A> "is_2_hops_from"@[] /city<D>, /city<A> "is_2_hops_from"@[] /city<E>) + 1 triple in dest graph.
trps: 3,
},
}
for _, entry := range testTable {
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
s, ctx := memory.NewStore(), context.Background()
populateStoreWithTriples(ctx, s, "?src", constructTestSrcTriples, t)
populateStoreWithTriples(ctx, s, "?dest", constructTestDestTriples, t)
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(entry.s, 1), st); err != nil {
t.Errorf("Parser.consume: failed to parse query %q with error %v", entry.s, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
_, err = plnr.Execute(ctx)
if err != nil {
t.Errorf("planner.Execute failed for query %q with error %v", entry.s, err)
continue
}
g, err := s.Graph(ctx, "?dest")
if err != nil {
t.Errorf("memory.DefaultStore.Graph(%q) should have not fail with error %v", "?test", err)
}
i := 0
ts := make(chan *triple.Triple)
go func() {
if err := g.Triples(ctx, storage.DefaultLookup, ts); err != nil {
t.Error(err)
}
}()
for range ts {
i++
}
if i != entry.trps {
t.Errorf("g.Triples should have returned %v triples, returned %v instead", entry.trps, i)
}
}
}
func TestPlannerConstructAddsCorrectTriples(t *testing.T) {
bql := `construct {?s "met"@[?t] ?o; "location"@[] /city<New York>;
"outcome"@[] "good"^^type:text.
?s "connected_to"@[] ?o }
into ?dest
from ?src
where {?s "met"@[] ?o.
?s "met_at"@[?t] ?o};`
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
s, ctx := memory.NewStore(), context.Background()
populateStoreWithTriples(ctx, s, "?src", constructTestSrcTriples, t)
populateStoreWithTriples(ctx, s, "?dest", "", t)
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(bql, 1), st); err != nil {
t.Errorf("Parser.consume: failed to parse query %q with error %v", bql, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
_, err = plnr.Execute(ctx)
if err != nil {
t.Errorf("planner.Execute failed for query %q with error %v", bql, err)
}
g, err := s.Graph(ctx, "?dest")
if err != nil {
t.Errorf("memory.DefaultStore.Graph(%q) should have not fail with error %v", "?test", err)
}
ts := make(chan *triple.Triple)
go func() {
if err := g.Triples(ctx, storage.DefaultLookup, ts); err != nil {
t.Error(err)
}
}()
bnm := make(map[string]map[string]bool)
bns := make(map[string]string)
bna := map[string]bool{
"/_<b1>": true,
"/_<b2>": true,
}
dtm := map[string]bool{
fmt.Sprintf("%s\t%s\t%s", `/person<A>`, `"connected_to"@[]`, `/person<B>`): false,
fmt.Sprintf("%s\t%s\t%s", `/person<B>`, `"connected_to"@[]`, `/person<C>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b1>`, `"_subject"@[2016-04-10T04:25:00Z]`, `/person<A>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b1>`, `"_predicate"@[2016-04-10T04:25:00Z]`, `"met"@[2016-04-10T04:25:00Z]`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b1>`, `"_object"@[2016-04-10T04:25:00Z]`, `/person<B>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b1>`, `"location"@[]`, `/city<New York>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b1>`, `"outcome"@[]`, `"good"^^type:text`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b2>`, `"_subject"@[2016-04-10T04:25:00Z]`, `/person<B>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b2>`, `"_predicate"@[2016-04-10T04:25:00Z]`, `"met"@[2016-04-10T04:25:00Z]`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b2>`, `"_object"@[2016-04-10T04:25:00Z]`, `/person<C>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b2>`, `"location"@[]`, `/city<New York>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b2>`, `"outcome"@[]`, `"good"^^type:text`): false,
}
// First, we map each blank node generated to a potential blank node placeholder (such as b1 or b2.)
sts := []*triple.Triple{}
for elem := range ts {
sts = append(sts, elem)
if elem.Subject().Type().String() == "/_" {
for k := range dtm {
trp, err := triple.Parse(k, literal.DefaultBuilder())
if err != nil {
t.Errorf("Unable to parse triple: %v with error %v", k, err)
}
if trp.Subject().Type().String() == "/_" &&
trp.Predicate().String() == elem.Predicate().String() &&
trp.Object().String() == elem.Object().String() {
if mp, ok := bnm[elem.Subject().String()]; !ok {
bnm[elem.Subject().String()] = map[string]bool{
trp.Subject().String(): true,
}
} else {
mp[trp.Subject().String()] = true
}
}
}
}
}
// Then, we decide which place holder blank nodes can be used to substiute for a given blank node
// by substituting the place holder in every triple where the given blank node is the subject and
// checking if the triple exists in the map of expected triples.
for _, t := range sts {
if t.Subject().Type().String() == "/_" {
for bn := range bnm[t.Subject().String()] {
rep := fmt.Sprintf("%s\t%s\t%s", bn, t.Predicate().String(), t.Object().String())
if _, ok := dtm[rep]; !ok {
bnm[t.Subject().String()][bn] = false
}
}
}
}
// Finally, we assign a blank node to a place-holder blank node, if the place-holder blank node is
// not used to substitute any other blank node.
for k, v := range bnm {
for bn, p := range v {
if p && bna[bn] {
bns[k] = bn
bna[bn] = false
break
}
}
}
if len(sts) != len(dtm) {
t.Errorf("g.Triples should have returned %v triples, returned %v instead", len(dtm), len(sts))
}
for _, elem := range sts {
if elem.Subject().Type().String() == "/_" {
if val, ok := bns[elem.Subject().String()]; ok {
// Substitute the blank node with the mapped place holder blank node id.
rep := fmt.Sprintf("%s\t%s\t%s", val, elem.Predicate().String(), elem.Object().String())
if _, ok := dtm[rep]; !ok {
t.Errorf("unexpected triple: %v added to graph", elem)
}
dtm[rep] = true
} else {
t.Errorf("unexpected triple: %v added to graph", elem)
}
} else {
sr := elem.String()
if _, ok := dtm[sr]; !ok {
t.Errorf("unexpected triple: %v added to graph", elem)
}
dtm[sr] = true
}
}
for k, v := range dtm {
if v == false {
t.Errorf("g.Triples did not return triple: %v", k)
}
}
}
func TestPlannerDeconstructRemovesCorrectTriples(t *testing.T) {
testTable := []struct {
s string
trps []string
}{
{
s: `deconstruct {?p1 "met"@[] ?p2}
in ?dest
from ?src
where {?p1 "lives_in"@[] /city<A>.
?p2 "lives_in"@[] /city<B>};`,
trps: []string{fmt.Sprintf("%s\t%s\t%s", `/person<B>`, `"met"@[]`, `/person<C>`),
fmt.Sprintf("%s\t%s\t%s", `/person<D>`, `"met"@[]`, `/person<A>`),
fmt.Sprintf("%s\t%s\t%s", `/person<A>`, `"met"@[]`, `/person<C>`),
fmt.Sprintf("%s\t%s\t%s", `/person<B>`, `"met"@[]`, `/person<D>`)},
},
{
s: `deconstruct {?p1 "met"@[] ?p2.
?p2 "met"@[] ?p1}
in ?dest
from ?src
where {?p1 "lives_in"@[] /city<A>.
?p2 "lives_in"@[] /city<B>};`,
trps: []string{fmt.Sprintf("%s\t%s\t%s", `/person<A>`, `"met"@[]`, `/person<C>`),
fmt.Sprintf("%s\t%s\t%s", `/person<B>`, `"met"@[]`, `/person<D>`)},
},
}
for _, entry := range testTable {
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
s, ctx := memory.NewStore(), context.Background()
populateStoreWithTriples(ctx, s, "?src", deconstructTestSrcTriples, t)
populateStoreWithTriples(ctx, s, "?dest", deconstructTestDestTriples, t)
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(entry.s, 1), st); err != nil {
t.Errorf("Parser.consume: failed to parse query %q with error %v", entry.s, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
_, err = plnr.Execute(ctx)
if err != nil {
t.Errorf("planner.Execute failed for query %q with error %v", entry.s, err)
continue
}
g, err := s.Graph(ctx, "?dest")
if err != nil {
t.Errorf("memory.DefaultStore.Graph(%q) should have not fail with error %v", "?test", err)
}
ts := make(chan *triple.Triple)
go func() {
if err := g.Triples(ctx, storage.DefaultLookup, ts); err != nil {
t.Error(err)
}
}()
dt := make(map[string]bool)
for _, trp := range entry.trps {
dt[trp] = false
}
i := 0
for trp := range ts {
if val, ok := dt[trp.String()]; ok {
if !val {
i++
}
dt[trp.String()] = true
} else {
t.Errorf("unexpected triple: %v added to graph", trp)
}
}
if i != len(entry.trps) {
t.Errorf("g.Triples did not return some of the triples.")
}
}
}
func TestTreeTraversalToRoot(t *testing.T) {
// Graph traversal data.
traversalTriples := `/person<Gavin Belson> "born in"@[] /city<Springfield>
/person<Gavin Belson> "parent of"@[] /person<Peter Belson>
/person<Gavin Belson> "parent of"@[] /person<Mary Belson>
/person<Mary Belson> "parent of"@[] /person<Amy Schumer>
/person<Mary Belson> "parent of"@[] /person<Joe Schumer>`
traversalQuery := `SELECT ?grandparent
FROM ?test
WHERE {
?s "parent of"@[] /person<Amy Schumer> .
?grandparent "parent of"@[] ?s
};`
// Load traversing data
s, ctx := memory.NewStore(), context.Background()
g, gErr := s.NewGraph(ctx, "?test")
if gErr != nil {
t.Fatalf("memory.NewGraph failed to create \"?test\" with error %v", gErr)
}
b := bytes.NewBufferString(traversalTriples)
if _, err := io.ReadIntoGraph(ctx, g, b, literal.DefaultBuilder()); err != nil {
t.Fatalf("io.ReadIntoGraph failed to read test graph with error %v", err)
}
p, pErr := grammar.NewParser(grammar.SemanticBQL())
if pErr != nil {
t.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", pErr)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(traversalQuery, 1), st); err != nil {
t.Errorf("Parser.consume: failed to parse query %q with error %v", traversalQuery, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
tbl, err := plnr.Execute(ctx)
if err != nil {
t.Errorf("planner.Execute failed for query %q with error %v", traversalQuery, err)
}
if got, want := len(tbl.Bindings()), 1; got != want {
t.Errorf("tbl.Bindings returned the wrong number of bindings for %q; got %d, want %d", traversalQuery, got, want)
}
if got, want := len(tbl.Rows()), 1; got != want {
t.Errorf("planner.Execute failed to return the expected number of rows for query %q; got %d want %d\nGot:\n%v\n", traversalQuery, got, want, tbl)
}
}
func TestChaining(t *testing.T) {
// Graph traversal data.
traversalTriples := `/u<joe> "parent_of"@[] /u<mary>
/u<joe> "parent_of"@[] /u<peter>
/u<peter> "parent_of"@[] /u<john>
/u<peter> "parent_of"@[] /u<eve>`
traversalQuery := `SELECT ?o FROM ?test
WHERE {
/u<joe> "parent_of"@[] ?o .
?o "parent_of"@[] /u<john>
};`
// Load traversing data
s, ctx := memory.NewStore(), context.Background()
g, gErr := s.NewGraph(ctx, "?test")
if gErr != nil {
t.Fatalf("memory.NewGraph failed to create \"?test\" with error %v", gErr)
}
b := bytes.NewBufferString(traversalTriples)
if _, err := io.ReadIntoGraph(ctx, g, b, literal.DefaultBuilder()); err != nil {
t.Fatalf("io.ReadIntoGraph failed to read test graph with error %v", err)
}
p, pErr := grammar.NewParser(grammar.SemanticBQL())
if pErr != nil {
t.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", pErr)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(traversalQuery, 1), st); err != nil {
t.Errorf("Parser.consume: failed to parse query %q with error %v", traversalQuery, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
tbl, err := plnr.Execute(ctx)
if err != nil {
t.Errorf("planner.Execute failed for query %q with error %v", traversalQuery, err)
}
if got, want := len(tbl.Bindings()), 1; got != want {
t.Errorf("tbl.Bindings returned the wrong number of bindings for %q; got %d, want %d", traversalQuery, got, want)
}
if got, want := len(tbl.Rows()), 1; got != want {
t.Errorf("planner.Execute failed to return the expected number of rows for query %q; got %d want %d\nGot:\n%v\n", traversalQuery, got, want, tbl)
}
}
func BenchmarkChaining(b *testing.B) {
// Graph traversal data.
traversalTriples := `/u<joe> "parent_of"@[] /u<mary>
/u<joe> "parent_of"@[] /u<peter>
/u<peter> "parent_of"@[] /u<john>
/u<peter> "parent_of"@[] /u<eve>`
traversalQuery := `SELECT ?o FROM ?test
WHERE {
/u<joe> "parent_of"@[] ?o .
?o "parent_of"@[] /u<john>
};`
// Load traversing data
s, ctx := memory.NewStore(), context.Background()
g, gErr := s.NewGraph(ctx, "?test")
if gErr != nil {
b.Fatalf("memory.NewGraph failed to create \"?test\" with error %v", gErr)
}
buf := bytes.NewBufferString(traversalTriples)
if _, err := io.ReadIntoGraph(ctx, g, buf, literal.DefaultBuilder()); err != nil {
b.Fatalf("io.ReadIntoGraph failed to read test graph with error %v", err)
}
for n := 0; n < b.N; n++ {
p, pErr := grammar.NewParser(grammar.SemanticBQL())
if pErr != nil {
b.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", pErr)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(traversalQuery, 1), st); err != nil {
b.Errorf("Parser.consume: failed to parse query %q with error %v", traversalQuery, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
b.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
tbl, err := plnr.Execute(ctx)
if err != nil {
b.Errorf("planner.Execute failed for query %q with error %v", traversalQuery, err)
}
if got, want := len(tbl.Bindings()), 1; got != want {
b.Errorf("tbl.Bindings returned the wrong number of bindings for %q; got %d, want %d", traversalQuery, got, want)
}
if got, want := len(tbl.Rows()), 1; got != want {
b.Errorf("planner.Execute failed to return the expected number of rows for query %q; got %d want %d\nGot:\n%v\n", traversalQuery, got, want, tbl)
}
}
}
// Test to validate https://github.com/google/badwolf/issues/70
func TestReificationResolutionIssue70(t *testing.T) {
// Graph traversal data.
issue70Triples := `/_<c175b457-e6d6-4ce3-8312-674353815720> "_predicate"@[] "/some/immutable/id"@[]
/_<c175b457-e6d6-4ce3-8312-674353815720> "_owner"@[2017-05-23T16:41:12.187373-07:00] /gid<0x9>
/_<c175b457-e6d6-4ce3-8312-674353815720> "_subject"@[] /aid</some/subject/id>
/_<c175b457-e6d6-4ce3-8312-674353815720> "_object"@[] /aid</some/object/id>
/_<cd8bae87-be96-41af-b1a8-27df990c9825> "_object"@[2017-05-23T16:41:12.187373-07:00] /aid</some/object/id>
/_<cd8bae87-be96-41af-b1a8-27df990c9825> "_owner"@[2017-05-23T16:41:12.187373-07:00] /gid<0x6>
/_<cd8bae87-be96-41af-b1a8-27df990c9825> "_predicate"@[2017-05-23T16:41:12.187373-07:00] "/some/temporal/id"@[2017-05-23T16:41:12.187373-07:00]
/_<cd8bae87-be96-41af-b1a8-27df990c9825> "_subject"@[2017-05-23T16:41:12.187373-07:00] /aid</some/subject/id>
/aid</some/subject/id> "/some/temporal/id"@[2017-05-23T16:41:12.187373-07:00] /aid</some/object/id>
/aid</some/subject/id> "/some/immutable/id"@[] /aid</some/object/id>
/aid</some/subject/id> "/some/ownerless_temporal/id"@[2017-05-23T16:41:12.187373-07:00] /aid</some/object/id>`
query := `
SELECT ?bn, ?p
FROM ?test
WHERE {
?bn "_subject"@[,] /aid</some/subject/id>.
?bn "_predicate"@[,] ?p .
?bn "_object"@[,] /aid</some/object/id>
};`
// Load traversing data
s, ctx := memory.NewStore(), context.Background()
g, gErr := s.NewGraph(ctx, "?test")
if gErr != nil {
t.Fatalf("memory.NewGraph failed to create \"?test\" with error %v", gErr)
}
b := bytes.NewBufferString(issue70Triples)
if _, err := io.ReadIntoGraph(ctx, g, b, literal.DefaultBuilder()); err != nil {
t.Fatalf("io.ReadIntoGraph failed to read test graph with error %v", err)
}
p, pErr := grammar.NewParser(grammar.SemanticBQL())
if pErr != nil {
t.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", pErr)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(query, 1), st); err != nil {
t.Errorf("Parser.consume: failed to parse query %q with error %v", query, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
tbl, err := plnr.Execute(ctx)
if err != nil {
t.Fatalf("planner.Execute failed for query %q with error %v", query, err)
}
if got, want := len(tbl.Bindings()), 2; got != want {
t.Errorf("tbl.Bindings returned the wrong number of bindings for %q; got %d, want %d", query, got, want)
}
if got, want := len(tbl.Rows()), 1; got != want {
t.Errorf("planner.Execute failed to return the expected number of rows for query %q; got %d want %d\nGot:\n%v\n", query, got, want, tbl)
}
}
// benchmarkQuery is a helper function that runs a specified query on the testing data set for benchmarking purposes.
func benchmarkQuery(query string, b *testing.B) {
s, ctx := memory.NewStore(), context.Background()
populateStoreWithTriples(ctx, s, "?test", testTriples, b)
b.ResetTimer()
for n := 0; n < b.N; n++ {
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
b.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(query, 1), st); err != nil {
b.Errorf("Parser.consume: failed to parse query %q with error %v", query, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
b.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
_, err = plnr.Execute(ctx)
if err != nil {
b.Errorf("planner.Execute failed for query %q with error %v", query, err)
}
}
}
// These benchmark tests are used to observe the difference in speed between queries using the "as" keyword as opposed
// to queries that do not.
func BenchmarkReg1(b *testing.B) {
benchmarkQuery(`select ?p, ?o as ?o1 from ?test where {/u<joe> ?p ?o};`, b)
}
func BenchmarkAs1(b *testing.B) {
benchmarkQuery(`select ?p as ?p1, ?o as ?o1 from ?test where {/u<joe> ?p ?o};`, b)
}
func BenchmarkReg2(b *testing.B) {
benchmarkQuery(`select ?s, ?p, ?o from ?test where {?s ?p ?o};`, b)
}
func BenchmarkAs2(b *testing.B) {
benchmarkQuery(`select ?s as ?s1, ?p as ?p1, ?o as ?o1 from ?test where {?s ?p ?o};`, b)
}
Add tests for node comparisons inside the HAVING clause (in "planner_test.go")
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package planner
import (
"bytes"
"context"
"fmt"
"strings"
"testing"
"github.com/google/badwolf/bql/grammar"
"github.com/google/badwolf/bql/semantic"
"github.com/google/badwolf/io"
"github.com/google/badwolf/storage"
"github.com/google/badwolf/storage/memory"
"github.com/google/badwolf/triple"
"github.com/google/badwolf/triple/literal"
)
const (
originalTriples = `/u<joe> "parent_of"@[] /u<mary>
/u<joe> "parent_of"@[] /u<peter>
/u<peter> "parent_of"@[] /u<john>
/u<peter> "parent_of"@[] /u<eve>
/u<peter> "bought"@[2016-01-01T00:00:00-08:00] /c<mini>
/u<peter> "bought"@[2016-02-01T00:00:00-08:00] /c<model s>
/u<peter> "bought"@[2016-03-01T00:00:00-08:00] /c<model x>
/u<peter> "bought"@[2016-04-01T00:00:00-08:00] /c<model y>
/c<mini> "is_a"@[] /t<car>
/c<model s> "is_a"@[] /t<car>
/c<model x> "is_a"@[] /t<car>
/c<model y> "is_a"@[] /t<car>
/l<barcelona> "predicate"@[] "turned"@[2016-01-01T00:00:00-08:00]
/l<barcelona> "predicate"@[] "turned"@[2016-02-01T00:00:00-08:00]
/l<barcelona> "predicate"@[] "turned"@[2016-03-01T00:00:00-08:00]
/l<barcelona> "predicate"@[] "turned"@[2016-04-01T00:00:00-08:00]
/u<alice> "height_cm"@[] "174"^^type:int64
/u<alice> "tag"@[] "abc"^^type:text
/u<bob> "height_cm"@[] "151"^^type:int64
/u<charlie> "height_cm"@[] "174"^^type:int64
/u<delta> "height_cm"@[] "174"^^type:int64
`
tripleFromIssue40 = `/room<Hallway> "connects_to"@[] /room<Kitchen>
/room<Kitchen> "connects_to"@[] /room<Hallway>
/room<Kitchen> "connects_to"@[] /room<Bathroom>
/room<Kitchen> "connects_to"@[] /room<Bedroom>
/room<Bathroom> "connects_to"@[] /room<Kitchen>
/room<Bedroom> "connects_to"@[] /room<Kitchen>
/room<Bedroom> "connects_to"@[] /room<Fire Escape>
/room<Fire Escape> "connects_to"@[] /room<Kitchen>
/item/book<000> "in"@[2016-04-10T4:21:00.000000000Z] /room<Hallway>
/item/book<000> "in"@[2016-04-10T4:23:00.000000000Z] /room<Kitchen>
/item/book<000> "in"@[2016-04-10T4:25:00.000000000Z] /room<Bedroom>
`
constructTestSrcTriples = `/person<A> "met"@[] /person<B>
/person<B> "met"@[] /person<C>
/person<C> "met"@[] /person<D>
/person<A> "met_at"@[2016-04-10T4:25:00.000000000Z] /person<B>
/person<B> "met_at"@[2016-04-10T4:25:00.000000000Z] /person<C>
/city<A> "is_connected_to"@[] /city<B>
/city<A> "is_connected_to"@[] /city<C>
/city<B> "is_connected_to"@[] /city<D>
/city<B> "is_connected_to"@[] /city<E>
/city<C> "is_connected_to"@[] /city<D>
`
constructTestDestTriples = `/person<D> "met"@[] /person<E>
`
deconstructTestSrcTriples = `/person<A> "lives_in"@[] /city<A>
/person<B> "lives_in"@[] /city<B>
/person<C> "lives_in"@[] /city<A>
/person<D> "lives_in"@[] /city<B>
`
deconstructTestDestTriples = `/person<A> "met"@[] /person<B>
/person<B> "met"@[] /person<C>
/person<C> "met"@[] /person<D>
/person<D> "met"@[] /person<A>
/person<A> "met"@[] /person<C>
/person<B> "met"@[] /person<D>
`
testTriples = originalTriples + tripleFromIssue40
)
func insertAndDeleteTest(t *testing.T) {
ctx := context.Background()
// Testing insertion of triples.
bql := `insert data into ?a {/_<foo> "bar"@[] /_<foo> .
/_<foo> "bar"@[] "bar"@[1975-01-01T00:01:01.999999999Z] .
/_<foo> "bar"@[] "yeah"^^type:text};`
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
stm := &semantic.Statement{}
if err = p.Parse(grammar.NewLLk(bql, 1), stm); err != nil {
t.Errorf("Parser.consume: failed to accept BQL %q with error %v", bql, err)
}
pln, err := New(ctx, memory.DefaultStore, stm, 0, 10, nil)
if err != nil {
t.Errorf("planner.New: should have not failed to create a plan using memory.DefaultStorage for statement %v with error %v", stm, err)
}
if _, err = pln.Execute(ctx); err != nil {
t.Errorf("planner.Execute: failed to execute insert plan with error %v", err)
}
g, err := memory.DefaultStore.Graph(ctx, "?a")
if err != nil {
t.Errorf("memory.DefaultStore.Graph(%q) should have not fail with error %v", "?a", err)
}
i := 0
ts := make(chan *triple.Triple)
go func() {
if err := g.Triples(ctx, storage.DefaultLookup, ts); err != nil {
t.Error(err)
}
}()
for range ts {
i++
}
if i != 3 {
t.Errorf("g.Triples should have returned 3 triples, returned %d instead", i)
}
// Testing deletion of triples.
bql = `delete data from ?a {/_<foo> "bar"@[] /_<foo> .
/_<foo> "bar"@[] "bar"@[1975-01-01T00:01:01.999999999Z] .
/_<foo> "bar"@[] "yeah"^^type:text};`
p, err = grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
stm = &semantic.Statement{}
if err = p.Parse(grammar.NewLLk(bql, 1), stm); err != nil {
t.Errorf("Parser.consume: failed to accept BQL %q with error %v", bql, err)
}
pln, err = New(ctx, memory.DefaultStore, stm, 0, 10, nil)
if err != nil {
t.Errorf("planner.New: should have not failed to create a plan using memory.DefaultStorage for statement %v with error %v", stm, err)
}
if _, err = pln.Execute(ctx); err != nil {
t.Errorf("planner.Execute: failed to execute insert plan with error %v", err)
}
g, err = memory.DefaultStore.Graph(ctx, "?a")
if err != nil {
t.Errorf("memory.DefaultStore.Graph(%q) should have not fail with error %v", "?a", err)
}
i = 0
ts = make(chan *triple.Triple)
if err = g.Triples(ctx, storage.DefaultLookup, ts); err != nil {
t.Error(err)
}
for range ts {
i++
}
if i != 0 {
t.Errorf("g.Triples should have returned 0 triples, returned %d instead", i)
}
}
func TestPlannerInsertDeleteDoesNotFail(t *testing.T) {
ctx := context.Background()
if _, err := memory.DefaultStore.NewGraph(ctx, "?a"); err != nil {
t.Errorf("memory.DefaultStore.NewGraph(%q) should have not failed with error %v", "?a", err)
}
insertAndDeleteTest(t)
if err := memory.DefaultStore.DeleteGraph(ctx, "?a"); err != nil {
t.Errorf("memory.DefaultStore.DeleteGraph(%q) should have not failed with error %v", "?a", err)
}
}
func TestPlannerCreateGraph(t *testing.T) {
ctx := context.Background()
memory.DefaultStore.DeleteGraph(ctx, "?foo")
memory.DefaultStore.DeleteGraph(ctx, "?bar")
bql := `create graph ?foo, ?bar;`
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
stm := &semantic.Statement{}
if err = p.Parse(grammar.NewLLk(bql, 1), stm); err != nil {
t.Errorf("Parser.consume: failed to accept BQL %q with error %v", bql, err)
}
pln, err := New(ctx, memory.DefaultStore, stm, 0, 10, nil)
if err != nil {
t.Errorf("planner.New: should have not failed to create a plan using memory.DefaultStorage for statement %v with error %v", stm, err)
}
if _, err := pln.Execute(ctx); err != nil {
t.Errorf("planner.Execute: failed to execute insert plan with error %v", err)
}
if _, err := memory.DefaultStore.Graph(ctx, "?foo"); err != nil {
t.Errorf("planner.Execute: failed to create graph %q with error %v", "?foo", err)
}
if _, err := memory.DefaultStore.Graph(ctx, "?bar"); err != nil {
t.Errorf("planner.Execute: failed to create graph %q with error %v", "?bar", err)
}
}
func TestPlannerDropGraph(t *testing.T) {
ctx := context.Background()
memory.DefaultStore.DeleteGraph(ctx, "?foo")
memory.DefaultStore.DeleteGraph(ctx, "?bar")
memory.DefaultStore.NewGraph(ctx, "?foo")
memory.DefaultStore.NewGraph(ctx, "?bar")
bql := `drop graph ?foo, ?bar;`
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
stm := &semantic.Statement{}
if err = p.Parse(grammar.NewLLk(bql, 1), stm); err != nil {
t.Errorf("Parser.consume: failed to accept BQL %q with error %v", bql, err)
}
pln, err := New(ctx, memory.DefaultStore, stm, 0, 10, nil)
if err != nil {
t.Errorf("planner.New: should have not failed to create a plan using memory.DefaultStorage for statement %v with error %v", stm, err)
}
if _, err := pln.Execute(ctx); err != nil {
t.Errorf("planner.Execute: failed to execute insert plan with error %v", err)
}
if g, err := memory.DefaultStore.Graph(ctx, "?foo"); err == nil {
t.Errorf("planner.Execute: failed to drop graph %q; returned %v", "?foo", g)
}
if g, err := memory.DefaultStore.Graph(ctx, "?bar"); err == nil {
t.Errorf("planner.Execute: failed to drop graph %q; returned %v", "?bar", g)
}
}
func populateStoreWithTriples(ctx context.Context, s storage.Store, gn string, triples string, tb testing.TB) {
g, err := s.NewGraph(ctx, gn)
if err != nil {
tb.Fatalf("memory.NewGraph failed to create \"%v\" with error %v", gn, err)
}
b := bytes.NewBufferString(triples)
if _, err := io.ReadIntoGraph(ctx, g, b, literal.DefaultBuilder()); err != nil {
tb.Fatalf("io.ReadIntoGraph failed to read test graph with error %v", err)
}
trpls := make(chan *triple.Triple)
go func() {
if err := g.Triples(ctx, storage.DefaultLookup, trpls); err != nil {
tb.Fatal(err)
}
}()
cnt := 0
for range trpls {
cnt++
}
if got, want := cnt, len(strings.Split(triples, "\n"))-1; got != want {
tb.Fatalf("Failed to import all test triples; got %v, want %v", got, want)
}
}
func TestPlannerQuery(t *testing.T) {
testTable := []struct {
q string
nBindings int
nRows int
}{
{
q: `select ?s, ?p, ?o from ?test where {?s ?p ?o};`,
nBindings: 3,
nRows: len(strings.Split(testTriples, "\n")) - 1,
},
{
q: `select ?s as ?s1, ?p as ?p1, ?o as ?o1 from ?test where {?s ?p ?o};`,
nBindings: 3,
nRows: len(strings.Split(testTriples, "\n")) - 1,
},
{
q: `select ?p, ?o from ?test where {/u<joe> ?p ?o};`,
nBindings: 2,
nRows: 2,
},
{
q: `select ?p as ?p1, ?o as ?o1 from ?test where {/u<joe> ?p ?o};`,
nBindings: 2,
nRows: 2,
},
{
q: `select ?s, ?p from ?test where {?s ?p /t<car>};`,
nBindings: 2,
nRows: 4,
},
{
q: `select ?s, ?o from ?test where {?s "parent_of"@[] ?o};`,
nBindings: 2,
nRows: 4,
},
{
q: `select ?s, ?p, ?o from ?test where {/u<joe> as ?s "parent_of"@[] as ?p /u<mary> as ?o};`,
nBindings: 3,
nRows: 1,
},
{
q: `select ?s, ?p, ?o from ?test where {/u<unknown> as ?s "parent_of"@[] as ?p /u<mary> as ?o};`,
nBindings: 3,
nRows: 0,
},
{
q: `select ?o from ?test where {/u<joe> "parent_of"@[] ?o};`,
nBindings: 1,
nRows: 2,
},
{
q: `select ?p from ?test where {/u<joe> ?p /u<mary>};`,
nBindings: 1,
nRows: 1,
},
{
q: `select ?s from ?test where {?s "is_a"@[] /t<car>};`,
nBindings: 1,
nRows: 4,
},
{
q: `select ?s as ?s1 from ?test where {?s "is_a"@[] /t<car>};`,
nBindings: 1,
nRows: 4,
},
{
q: `select ?o from ?test where {/u<joe> "parent_of"@[] ?o. ?o "parent_of"@[] /u<john>};`,
nBindings: 1,
nRows: 1,
},
{
q: `select ?s, ?o from ?test where {/u<joe> "parent_of"@[] ?o. ?o "parent_of"@[] ?s};`,
nBindings: 2,
nRows: 2,
},
{
q: `select ?s, ?p, ?o, ?k, ?l, ?m from ?test where {?s ?p ?o. ?k ?l ?m};`,
nBindings: 6,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?s, ?p, ?o, ?k, ?l from ?test where {?s ?p ?o. ?k ?l ?m};`,
nBindings: 5,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?s, ?p, ?o, ?k from ?test where {?s ?p ?o. ?k ?l ?m};`,
nBindings: 4,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?s, ?p, ?o from ?test where {?s ?p ?o. ?k ?l ?m};`,
nBindings: 3,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?s, ?p from ?test where {?s ?p ?o. ?k ?l ?m};`,
nBindings: 2,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?s from ?test where {?s ?p ?o. ?k ?l ?m};`,
nBindings: 1,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[,] ?o};`,
nBindings: 1,
nRows: 4,
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[,2015-01-01T00:00:00-08:00] ?o};`,
nBindings: 1,
nRows: 0,
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[2017-01-01T00:00:00-08:00,] ?o};`,
nBindings: 1,
nRows: 0,
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] ?o};`,
nBindings: 1,
nRows: 4,
},
{
q: `select ?o from ?test where {/l<barcelona> "predicate"@[] "turned"@[,] as ?o};`,
nBindings: 1,
nRows: 4,
},
{
q: `select ?o from ?test where {/l<barcelona> "predicate"@[] "turned"@[,2015-01-01T00:00:00-08:00] as ?o};`,
nBindings: 1,
nRows: 0,
},
{
q: `select ?o from ?test where {/l<barcelona> "predicate"@[] "turned"@[2017-01-01T00:00:00-08:00,] as ?o};`,
nBindings: 1,
nRows: 0,
},
{
q: `select ?o from ?test where {/l<barcelona> "predicate"@[] "turned"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] as ?o};`,
nBindings: 1,
nRows: 4,
},
{
q: `select ?grandparent, count(?name) as ?grandchildren from ?test where {/u<joe> as ?grandparent "parent_of"@[] ?offspring . ?offspring "parent_of"@[] ?name} group by ?grandparent;`,
nBindings: 2,
nRows: 1,
},
{
q: `select ?grandparent, count(distinct ?name) as ?grandchildren from ?test where {/u<joe> as ?grandparent "parent_of"@[] ?offspring . ?offspring "parent_of"@[] ?name} group by ?grandparent;`,
nBindings: 2,
nRows: 1,
},
{
q: `select ?s, ?p, ?o, ?k, ?l, ?m from ?test where {?s ?p ?o. ?k ?l ?m} order by ?s, ?p, ?o, ?k, ?l, ?m;`,
nBindings: 6,
nRows: (len(strings.Split(testTriples, "\n")) - 1) * (len(strings.Split(testTriples, "\n")) - 1),
},
{
q: `select ?s, ?p, ?o, ?k, ?l, ?m from ?test where {?s ?p ?o. ?k ?l ?m} order by ?s, ?p, ?o, ?k, ?l, ?m having not(?s = ?s);`,
nBindings: 6,
nRows: 0,
},
{
q: `select ?o from ?test where {/l<barcelona> "predicate"@[] "turned"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] as ?o} LIMIT "2"^^type:int64;`,
nBindings: 1,
nRows: 2,
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] ?o} before 2016-03-01T00:00:00-08:00;`,
nBindings: 1,
nRows: 3,
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] ?o} after 2016-02-01T00:00:00-08:00;`,
nBindings: 1,
nRows: 3,
},
{
q: `select ?o from ?test where {/u<peter> "bought"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] ?o} between 2014-01-01T00:00:00-08:00, 2017-01-01T00:00:00-08:00;`,
nBindings: 1,
nRows: 4,
},
{
q: `SELECT ?grandparent, COUNT(?grandparent) AS ?number_of_grandchildren FROM ?test WHERE{ ?gp ID ?grandparent "parent_of"@[] ?c . ?c "parent_of"@[] ?gc ID ?gc } GROUP BY ?grandparent;`,
nBindings: 2,
nRows: 1,
},
{ // Issue 40 (https://github.com/google/badwolf/issues/40)
q: `SELECT ?item, ?t FROM ?test WHERE {?item "in"@[?t] /room<Bedroom>};`,
nBindings: 2,
nRows: 1,
},
{
q: `SHOW GRAPHS;`,
nBindings: 1,
nRows: 1,
},
{
q: `select ?s, ?o from ?test where {?s "tag"@[] ?o} having ?o = "abc"^^type:text;`,
nBindings: 2,
nRows: 1,
},
{
q: `select ?s, ?height from ?test where {?s "height_cm"@[] ?height} having ?height > "0"^^type:int64;`,
nBindings: 2,
nRows: 4,
},
{
q: `select ?s, ?height from ?test where {?s "height_cm"@[] ?height} having ?height > "160"^^type:int64;`,
nBindings: 2,
nRows: 3,
},
{
q: `select ?s, ?height from ?test where {?s "height_cm"@[] ?height} having ?height = "151"^^type:int64;`,
nBindings: 2,
nRows: 1,
},
/*
/c<model s> "is_a"@[] /t<car>
/c<model x> "is_a"@[] /t<car>
/c<model y> "is_a"@[] /t<car>
*/
// OPTIONAL clauses.
{
q: `SELECT ?car FROM ?test WHERE { ?car "is_a"@[] /t<car> };`,
nBindings: 1,
nRows: 4,
},
{
q: `SELECT ?car
FROM ?test
WHERE {
/c<model s> as ?car "is_a"@[] /t<car>
};`,
nBindings: 1,
nRows: 1,
},
{
q: `SELECT ?car
FROM ?test
WHERE {
?car "is_a"@[] /t<car> .
/c<model z> as ?car "is_a"@[] /t<car>
};`,
nBindings: 1,
nRows: 0,
},
{
q: `SELECT ?car
FROM ?test
WHERE {
?car "is_a"@[] /t<car> .
OPTIONAL { /c<model O> "is_a"@[] /t<car> }
};`,
nBindings: 1,
nRows: 4,
},
{
q: `SELECT ?car
FROM ?test
WHERE {
?car "is_a"@[] /t<car> .
OPTIONAL { ?car "is_a"@[] /t<car> }
};`,
nBindings: 1,
nRows: 4,
},
{
q: `SELECT ?cars, ?type
FROM ?test
WHERE {
?cars "is_a"@[] /t<car> .
OPTIONAL { ?cars "is_a"@[] ?type }
};`,
nBindings: 2,
nRows: 4,
},
{
q: `SELECT ?p, ?o
FROM ?test
WHERE {
/c<mini> ?p ?o
}
HAVING ?o > "37"^^type:int64;`,
nBindings: 2,
nRows: 0,
},
{
q: `SELECT ?o
FROM ?test
WHERE {
/u<alice> "height_cm"@[] ?o
}
HAVING ?o = /u<peter>;`,
nBindings: 1,
nRows: 0,
},
{
q: `SELECT ?s_id, ?height
FROM ?test
WHERE {
?s ID ?s_id "height_cm"@[] ?height
}
HAVING ?s_id = "alice"^^type:text;`,
nBindings: 2,
nRows: 1,
},
{
q: `SELECT ?p_id, ?o
FROM ?test
WHERE {
/u<peter> ?p ID ?p_id ?o
}
HAVING ?p_id < "parent_of"^^type:text;`,
nBindings: 2,
nRows: 4,
},
{
q: `SELECT ?s, ?s_type
FROM ?test
WHERE {
?s TYPE ?s_type ?p ?o
}
HAVING ?s_type = "/c"^^type:text;`,
nBindings: 2,
nRows: 4,
},
{
q: `SELECT ?s, ?s_type
FROM ?test
WHERE {
?s TYPE ?s_type ?p ?o
}
HAVING ?s_type < "/l"^^type:text;`,
nBindings: 2,
nRows: 7,
},
{
q: `SELECT ?p, ?p_id, ?o
FROM ?test
WHERE {
/u<peter> ?p ID ?p_id ?o
}
HAVING ?p_id = "bought"^^type:text
AFTER 2016-03-01T00:00:00-08:00;`,
nBindings: 3,
nRows: 2,
},
{
q: `SELECT ?p, ?p_id, ?o
FROM ?test
WHERE {
/u<peter> ?p ID ?p_id ?o
}
HAVING ?p_id < "parent_of"^^type:text
BEFORE 2016-03-01T00:00:00-08:00;`,
nBindings: 3,
nRows: 3,
},
{
q: `SELECT ?p, ?p_id, ?o
FROM ?test
WHERE {
/u<peter> ?p ID ?p_id ?o
}
HAVING ?p_id = "bought"^^type:text
BETWEEN 2016-02-01T00:00:00-08:00, 2016-03-01T00:00:00-08:00;`,
nBindings: 3,
nRows: 2,
},
{
q: `SELECT ?p, ?p_id, ?o
FROM ?test
WHERE {
/u<peter> ?p ID ?p_id ?o
}
HAVING ?p_id < "work_with"^^type:text
BEFORE 2016-02-01T00:00:00-08:00;`,
nBindings: 3,
nRows: 4,
},
{
q: `SELECT ?o, ?o_type
FROM ?test
WHERE {
?s ?p ?o TYPE ?o_type
}
HAVING (?s = /u<joe>) OR (?s = /l<barcelona>) OR (?s = /u<alice>);`,
nBindings: 2,
nRows: 2,
},
{
q: `SELECT ?p, ?time, ?o
FROM ?test
WHERE {
/u<peter> ?p AT ?time ?o
};`,
nBindings: 3,
nRows: 4,
},
{
q: `SELECT ?s, ?time, ?o
FROM ?test
WHERE {
?s "parent_of"@[?time] ?o
};`,
nBindings: 3,
nRows: 0,
},
{
q: `SELECT ?s, ?s_alias, ?s_id, ?s_type
FROM ?test
WHERE {
?s AS ?s_alias ID ?s_id TYPE ?s_type "parent_of"@[] ?o
};`,
nBindings: 4,
nRows: 4,
},
{
q: `SELECT ?s, ?s_alias, ?s_id, ?s_type
FROM ?test
WHERE {
?s AS ?s_alias TYPE ?s_type ID ?s_id "parent_of"@[] ?o
};`,
nBindings: 4,
nRows: 4,
},
{
q: `SELECT ?o, ?o_alias, ?o_id, ?o_type
FROM ?test
WHERE {
?s "parent_of"@[] ?o AS ?o_alias ID ?o_id TYPE ?o_type
};`,
nBindings: 4,
nRows: 4,
},
{
q: `SELECT ?o, ?o_alias, ?o_id, ?o_type
FROM ?test
WHERE {
?s "parent_of"@[] ?o AS ?o_alias TYPE ?o_type ID ?o_id
};`,
nBindings: 4,
nRows: 4,
},
{
q: `SELECT ?o
FROM ?test
WHERE {
/u<peter> "bought"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] ?o
}
BEFORE 2016-03-01T00:00:00-08:00
LIMIT "1"^^type:int64;`,
nBindings: 1,
nRows: 1,
},
{
q: `SELECT ?o
FROM ?test
WHERE {
/u<peter> "bought"@[2015-01-01T00:00:00-08:00,2017-01-01T00:00:00-08:00] ?o
}
AFTER 2016-02-01T00:00:00-08:00
LIMIT "1"^^type:int64;`,
nBindings: 1,
nRows: 1,
},
{
q: `SELECT ?p, ?time
FROM ?test
WHERE {
/u<peter> ?p AT ?time ?o
}
HAVING ?time < 2016-03-01T00:00:00-08:00;`,
nBindings: 2,
nRows: 2,
},
{
q: `SELECT ?p, ?time
FROM ?test
WHERE {
/u<peter> ?p AT ?time ?o
}
HAVING ?time < 2016-03-01T00:00:00-08:00
LIMIT "1"^^type:int64;`,
nBindings: 2,
nRows: 1,
},
{
q: `SELECT ?s, ?p, ?p_id, ?time
FROM ?test
WHERE {
?s ?p ID ?p_id AT ?time ?o
}
HAVING (?p_id < "in"^^type:text) AND (?time > 2016-02-01T00:00:00-08:00);`,
nBindings: 4,
nRows: 2,
},
{
q: `SELECT ?p, ?time
FROM ?test
WHERE {
/u<peter> ?p AT ?time ?o
}
HAVING ?time = 2016-01-01T01:00:00-07:00;`,
nBindings: 2,
nRows: 1,
},
{
q: `SELECT ?p, ?time
FROM ?test
WHERE {
/u<peter> ?p AT ?time ?o
}
HAVING ?time > 2016-02-01T00:00:00-07:00;`,
nBindings: 2,
nRows: 3,
},
{
q: `SELECT ?s, ?p, ?o
FROM ?test
WHERE {
?s ?p ?o
}
HAVING ?p = "height_cm"@[];`,
nBindings: 3,
nRows: 4,
},
{
q: `SELECT ?s, ?p, ?o
FROM ?test
WHERE {
?s ?p ?o
}
HAVING ?p = "bought"@[2016-03-01T00:00:00-08:00];`,
nBindings: 3,
nRows: 1,
},
{
q: `SELECT ?s, ?p, ?o
FROM ?test
WHERE {
?s ?p ?o
}
HAVING (?p = "tag"@[]) OR (?p = "bought"@[2016-02-01T00:00:00-08:00]);`,
nBindings: 3,
nRows: 2,
},
{
q: `SELECT ?p, ?o
FROM ?test
WHERE {
/u<joe> ?p ?o .
};`,
nBindings: 2,
nRows: 2,
},
{
q: `SELECT ?o
FROM ?test
WHERE {
/u<joe> "parent_of"@[] ?o .
?o "parent_of"@[] /u<john> .
};`,
nBindings: 1,
nRows: 1,
},
{
q: `SELECT ?p1, ?p2
FROM ?test
WHERE {
/u<joe> ?p1 /u<mary> .
/u<joe> ?p2 /u<peter> .
};`,
nBindings: 2,
nRows: 1,
},
{
q: `SELECT ?car
FROM ?test
WHERE {
?car "is_a"@[] /t<car> .
OPTIONAL { /c<model O> "is_a"@[] /t<car> } .
};`,
nBindings: 1,
nRows: 4,
},
{
q: `SELECT ?s, ?height
FROM ?test
WHERE {
?s "height_cm"@[] ?height
}
HAVING ?s = /u<bob>;`,
nBindings: 2,
nRows: 1,
},
{
q: `SELECT ?s, ?p, ?o
FROM ?test
WHERE {
?s ?p ?o
}
HAVING (?s = /u<bob>) OR (?o = /t<car>);`,
nBindings: 3,
nRows: 5,
},
}
s, ctx := memory.NewStore(), context.Background()
populateStoreWithTriples(ctx, s, "?test", testTriples, t)
for _, entry := range testTable {
// Setup for test:
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(entry.q, 1), st); err != nil {
t.Fatalf("parser.Parse failed for query \"%s\"\nwith error: %v", entry.q, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Fatalf("planner.New failed to create a valid query plan with error: %v", err)
}
// Actual test:
tbl, err := plnr.Execute(ctx)
if err != nil {
t.Fatalf("planner.Execute(%s)\n= _, %v; want _, nil", entry.q, err)
}
if got, want := len(tbl.Bindings()), entry.nBindings; got != want {
t.Errorf("planner.Execute(%s)\n= a Table with %d bindings; want %d", entry.q, got, want)
}
if got, want := len(tbl.Rows()), entry.nRows; got != want {
t.Errorf("planner.Execute(%s)\n= a Table with %d rows; want %d\nTable:\n%v\n", entry.q, got, want, tbl)
}
}
}
func TestPlannerQueryError(t *testing.T) {
testTable := []struct {
q string
}{
{
q: `SELECT ?s_id, ?height
FROM ?test
WHERE {
?s ID ?s_id "height_cm"@[] ?height
}
HAVING ?s_id > "37"^^type:int64;`,
},
{
q: `SELECT ?s_id, ?height
FROM ?test
WHERE {
?s ID ?s_id "height_cm"@[] ?height
}
HAVING ?s_id = /u<alice>;`,
},
{
q: `SELECT ?s, ?p, ?o
FROM ?test
WHERE {
?s ?p ?o
}
HAVING ?p < "height_cm"@[];`,
},
{
q: `SELECT ?s, ?p, ?o
FROM ?test
WHERE {
?s ?p ?o
}
HAVING ?p > "bought"@[2016-01-01T00:00:00-08:00];`,
},
{
q: `SELECT ?s, ?height
FROM ?test
WHERE {
?s "height_cm"@[] ?height
}
HAVING ?s < /u<zzzzz>;`,
},
{
q: `SELECT ?s, ?height
FROM ?test
WHERE {
?s "height_cm"@[] ?height
}
HAVING ?s > /u<alice>;`,
},
}
s, ctx := memory.NewStore(), context.Background()
populateStoreWithTriples(ctx, s, "?test", testTriples, t)
for _, entry := range testTable {
// Setup for test:
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(entry.q, 1), st); err != nil {
t.Fatalf("parser.Parse failed for query \"%s\"\nwith error: %v", entry.q, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Fatalf("planner.New failed to create a valid query plan with error: %v", err)
}
// Actual test:
_, err = plnr.Execute(ctx)
if err == nil {
t.Errorf("planner.Execute(%s)\n= _, nil; want _, error", entry.q)
}
}
}
func TestPlannerConstructAddsCorrectNumberofTriples(t *testing.T) {
sts, dts := len(strings.Split(constructTestSrcTriples, "\n"))-1, len(strings.Split(constructTestDestTriples, "\n"))-1
testTable := []struct {
s string
trps int
}{
{
s: `construct {?s ?p ?o}
into ?dest
from ?src
where {?s ?p ?o};`,
trps: sts + dts,
},
{
s: `construct {?s "met"@[] ?o; "location"@[] /city<New York>}
into ?dest
from ?src
where {?s "met"@[] ?o};`,
// 3 matching triples * 4 new triples per matched triple due to reification + 1 triple in dest graph.
trps: 3*4 + dts,
},
{
s: `construct {?s "met"@[] ?o; "location"@[] /city<New York>;
"outcome"@[] "good"^^type:text }
into ?dest
from ?src
where {?s "met"@[] ?o};`,
// 3 matching triples * 5 new triples per matched triple due to reification + 1 triple in dest graph.
trps: 3*5 + dts,
},
{
s: `construct {?s "met"@[?t] ?o; "location"@[] /city<New York>;
"outcome"@[] "good"^^type:text .
?s "connected_to"@[] ?o}
into ?dest
from ?src
where {?s "met"@[] ?o.
?s "met_at"@[?t] ?o};`,
// 2 matching triples * (5 new triples due to reification + 1 explicitly constructed triple per matched triple) +
// 1 triple in dest graph.
trps: 2*6 + dts,
},
{
s: `construct {?s "met"@[?t] ?o; "location"@[] /city<New York>;
"outcome"@[] "good"^^type:text .
?s "connected_to"@[] ?o; "at"@[?t] /city<New York> }
into ?dest
from ?src
where {?s "met"@[] ?o.
?s "met_at"@[?t] ?o};`,
// 2 matching triples * 9 new triples due to reification + 1 triple in dest graph.
trps: 2*9 + dts,
},
{
s: `construct {?d2 "is_2_hops_from"@[] ?s1 }
into ?dest
from ?src
where {?s1 "is_connected_to"@[] ?d1.
?d1 "is_connected_to"@[] ?d2};`,
// 2 new triples (/city<A> "is_2_hops_from"@[] /city<D>, /city<A> "is_2_hops_from"@[] /city<E>) + 1 triple in dest graph.
trps: 3,
},
}
for _, entry := range testTable {
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
s, ctx := memory.NewStore(), context.Background()
populateStoreWithTriples(ctx, s, "?src", constructTestSrcTriples, t)
populateStoreWithTriples(ctx, s, "?dest", constructTestDestTriples, t)
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(entry.s, 1), st); err != nil {
t.Errorf("Parser.consume: failed to parse query %q with error %v", entry.s, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
_, err = plnr.Execute(ctx)
if err != nil {
t.Errorf("planner.Execute failed for query %q with error %v", entry.s, err)
continue
}
g, err := s.Graph(ctx, "?dest")
if err != nil {
t.Errorf("memory.DefaultStore.Graph(%q) should have not fail with error %v", "?test", err)
}
i := 0
ts := make(chan *triple.Triple)
go func() {
if err := g.Triples(ctx, storage.DefaultLookup, ts); err != nil {
t.Error(err)
}
}()
for range ts {
i++
}
if i != entry.trps {
t.Errorf("g.Triples should have returned %v triples, returned %v instead", entry.trps, i)
}
}
}
func TestPlannerConstructAddsCorrectTriples(t *testing.T) {
bql := `construct {?s "met"@[?t] ?o; "location"@[] /city<New York>;
"outcome"@[] "good"^^type:text.
?s "connected_to"@[] ?o }
into ?dest
from ?src
where {?s "met"@[] ?o.
?s "met_at"@[?t] ?o};`
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
s, ctx := memory.NewStore(), context.Background()
populateStoreWithTriples(ctx, s, "?src", constructTestSrcTriples, t)
populateStoreWithTriples(ctx, s, "?dest", "", t)
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(bql, 1), st); err != nil {
t.Errorf("Parser.consume: failed to parse query %q with error %v", bql, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
_, err = plnr.Execute(ctx)
if err != nil {
t.Errorf("planner.Execute failed for query %q with error %v", bql, err)
}
g, err := s.Graph(ctx, "?dest")
if err != nil {
t.Errorf("memory.DefaultStore.Graph(%q) should have not fail with error %v", "?test", err)
}
ts := make(chan *triple.Triple)
go func() {
if err := g.Triples(ctx, storage.DefaultLookup, ts); err != nil {
t.Error(err)
}
}()
bnm := make(map[string]map[string]bool)
bns := make(map[string]string)
bna := map[string]bool{
"/_<b1>": true,
"/_<b2>": true,
}
dtm := map[string]bool{
fmt.Sprintf("%s\t%s\t%s", `/person<A>`, `"connected_to"@[]`, `/person<B>`): false,
fmt.Sprintf("%s\t%s\t%s", `/person<B>`, `"connected_to"@[]`, `/person<C>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b1>`, `"_subject"@[2016-04-10T04:25:00Z]`, `/person<A>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b1>`, `"_predicate"@[2016-04-10T04:25:00Z]`, `"met"@[2016-04-10T04:25:00Z]`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b1>`, `"_object"@[2016-04-10T04:25:00Z]`, `/person<B>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b1>`, `"location"@[]`, `/city<New York>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b1>`, `"outcome"@[]`, `"good"^^type:text`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b2>`, `"_subject"@[2016-04-10T04:25:00Z]`, `/person<B>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b2>`, `"_predicate"@[2016-04-10T04:25:00Z]`, `"met"@[2016-04-10T04:25:00Z]`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b2>`, `"_object"@[2016-04-10T04:25:00Z]`, `/person<C>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b2>`, `"location"@[]`, `/city<New York>`): false,
fmt.Sprintf("%s\t%s\t%s", `/_<b2>`, `"outcome"@[]`, `"good"^^type:text`): false,
}
// First, we map each blank node generated to a potential blank node placeholder (such as b1 or b2.)
sts := []*triple.Triple{}
for elem := range ts {
sts = append(sts, elem)
if elem.Subject().Type().String() == "/_" {
for k := range dtm {
trp, err := triple.Parse(k, literal.DefaultBuilder())
if err != nil {
t.Errorf("Unable to parse triple: %v with error %v", k, err)
}
if trp.Subject().Type().String() == "/_" &&
trp.Predicate().String() == elem.Predicate().String() &&
trp.Object().String() == elem.Object().String() {
if mp, ok := bnm[elem.Subject().String()]; !ok {
bnm[elem.Subject().String()] = map[string]bool{
trp.Subject().String(): true,
}
} else {
mp[trp.Subject().String()] = true
}
}
}
}
}
// Then, we decide which place holder blank nodes can be used to substiute for a given blank node
// by substituting the place holder in every triple where the given blank node is the subject and
// checking if the triple exists in the map of expected triples.
for _, t := range sts {
if t.Subject().Type().String() == "/_" {
for bn := range bnm[t.Subject().String()] {
rep := fmt.Sprintf("%s\t%s\t%s", bn, t.Predicate().String(), t.Object().String())
if _, ok := dtm[rep]; !ok {
bnm[t.Subject().String()][bn] = false
}
}
}
}
// Finally, we assign a blank node to a place-holder blank node, if the place-holder blank node is
// not used to substitute any other blank node.
for k, v := range bnm {
for bn, p := range v {
if p && bna[bn] {
bns[k] = bn
bna[bn] = false
break
}
}
}
if len(sts) != len(dtm) {
t.Errorf("g.Triples should have returned %v triples, returned %v instead", len(dtm), len(sts))
}
for _, elem := range sts {
if elem.Subject().Type().String() == "/_" {
if val, ok := bns[elem.Subject().String()]; ok {
// Substitute the blank node with the mapped place holder blank node id.
rep := fmt.Sprintf("%s\t%s\t%s", val, elem.Predicate().String(), elem.Object().String())
if _, ok := dtm[rep]; !ok {
t.Errorf("unexpected triple: %v added to graph", elem)
}
dtm[rep] = true
} else {
t.Errorf("unexpected triple: %v added to graph", elem)
}
} else {
sr := elem.String()
if _, ok := dtm[sr]; !ok {
t.Errorf("unexpected triple: %v added to graph", elem)
}
dtm[sr] = true
}
}
for k, v := range dtm {
if v == false {
t.Errorf("g.Triples did not return triple: %v", k)
}
}
}
func TestPlannerDeconstructRemovesCorrectTriples(t *testing.T) {
testTable := []struct {
s string
trps []string
}{
{
s: `deconstruct {?p1 "met"@[] ?p2}
in ?dest
from ?src
where {?p1 "lives_in"@[] /city<A>.
?p2 "lives_in"@[] /city<B>};`,
trps: []string{fmt.Sprintf("%s\t%s\t%s", `/person<B>`, `"met"@[]`, `/person<C>`),
fmt.Sprintf("%s\t%s\t%s", `/person<D>`, `"met"@[]`, `/person<A>`),
fmt.Sprintf("%s\t%s\t%s", `/person<A>`, `"met"@[]`, `/person<C>`),
fmt.Sprintf("%s\t%s\t%s", `/person<B>`, `"met"@[]`, `/person<D>`)},
},
{
s: `deconstruct {?p1 "met"@[] ?p2.
?p2 "met"@[] ?p1}
in ?dest
from ?src
where {?p1 "lives_in"@[] /city<A>.
?p2 "lives_in"@[] /city<B>};`,
trps: []string{fmt.Sprintf("%s\t%s\t%s", `/person<A>`, `"met"@[]`, `/person<C>`),
fmt.Sprintf("%s\t%s\t%s", `/person<B>`, `"met"@[]`, `/person<D>`)},
},
}
for _, entry := range testTable {
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
t.Errorf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
s, ctx := memory.NewStore(), context.Background()
populateStoreWithTriples(ctx, s, "?src", deconstructTestSrcTriples, t)
populateStoreWithTriples(ctx, s, "?dest", deconstructTestDestTriples, t)
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(entry.s, 1), st); err != nil {
t.Errorf("Parser.consume: failed to parse query %q with error %v", entry.s, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
_, err = plnr.Execute(ctx)
if err != nil {
t.Errorf("planner.Execute failed for query %q with error %v", entry.s, err)
continue
}
g, err := s.Graph(ctx, "?dest")
if err != nil {
t.Errorf("memory.DefaultStore.Graph(%q) should have not fail with error %v", "?test", err)
}
ts := make(chan *triple.Triple)
go func() {
if err := g.Triples(ctx, storage.DefaultLookup, ts); err != nil {
t.Error(err)
}
}()
dt := make(map[string]bool)
for _, trp := range entry.trps {
dt[trp] = false
}
i := 0
for trp := range ts {
if val, ok := dt[trp.String()]; ok {
if !val {
i++
}
dt[trp.String()] = true
} else {
t.Errorf("unexpected triple: %v added to graph", trp)
}
}
if i != len(entry.trps) {
t.Errorf("g.Triples did not return some of the triples.")
}
}
}
func TestTreeTraversalToRoot(t *testing.T) {
// Graph traversal data.
traversalTriples := `/person<Gavin Belson> "born in"@[] /city<Springfield>
/person<Gavin Belson> "parent of"@[] /person<Peter Belson>
/person<Gavin Belson> "parent of"@[] /person<Mary Belson>
/person<Mary Belson> "parent of"@[] /person<Amy Schumer>
/person<Mary Belson> "parent of"@[] /person<Joe Schumer>`
traversalQuery := `SELECT ?grandparent
FROM ?test
WHERE {
?s "parent of"@[] /person<Amy Schumer> .
?grandparent "parent of"@[] ?s
};`
// Load traversing data
s, ctx := memory.NewStore(), context.Background()
g, gErr := s.NewGraph(ctx, "?test")
if gErr != nil {
t.Fatalf("memory.NewGraph failed to create \"?test\" with error %v", gErr)
}
b := bytes.NewBufferString(traversalTriples)
if _, err := io.ReadIntoGraph(ctx, g, b, literal.DefaultBuilder()); err != nil {
t.Fatalf("io.ReadIntoGraph failed to read test graph with error %v", err)
}
p, pErr := grammar.NewParser(grammar.SemanticBQL())
if pErr != nil {
t.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", pErr)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(traversalQuery, 1), st); err != nil {
t.Errorf("Parser.consume: failed to parse query %q with error %v", traversalQuery, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
tbl, err := plnr.Execute(ctx)
if err != nil {
t.Errorf("planner.Execute failed for query %q with error %v", traversalQuery, err)
}
if got, want := len(tbl.Bindings()), 1; got != want {
t.Errorf("tbl.Bindings returned the wrong number of bindings for %q; got %d, want %d", traversalQuery, got, want)
}
if got, want := len(tbl.Rows()), 1; got != want {
t.Errorf("planner.Execute failed to return the expected number of rows for query %q; got %d want %d\nGot:\n%v\n", traversalQuery, got, want, tbl)
}
}
func TestChaining(t *testing.T) {
// Graph traversal data.
traversalTriples := `/u<joe> "parent_of"@[] /u<mary>
/u<joe> "parent_of"@[] /u<peter>
/u<peter> "parent_of"@[] /u<john>
/u<peter> "parent_of"@[] /u<eve>`
traversalQuery := `SELECT ?o FROM ?test
WHERE {
/u<joe> "parent_of"@[] ?o .
?o "parent_of"@[] /u<john>
};`
// Load traversing data
s, ctx := memory.NewStore(), context.Background()
g, gErr := s.NewGraph(ctx, "?test")
if gErr != nil {
t.Fatalf("memory.NewGraph failed to create \"?test\" with error %v", gErr)
}
b := bytes.NewBufferString(traversalTriples)
if _, err := io.ReadIntoGraph(ctx, g, b, literal.DefaultBuilder()); err != nil {
t.Fatalf("io.ReadIntoGraph failed to read test graph with error %v", err)
}
p, pErr := grammar.NewParser(grammar.SemanticBQL())
if pErr != nil {
t.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", pErr)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(traversalQuery, 1), st); err != nil {
t.Errorf("Parser.consume: failed to parse query %q with error %v", traversalQuery, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
tbl, err := plnr.Execute(ctx)
if err != nil {
t.Errorf("planner.Execute failed for query %q with error %v", traversalQuery, err)
}
if got, want := len(tbl.Bindings()), 1; got != want {
t.Errorf("tbl.Bindings returned the wrong number of bindings for %q; got %d, want %d", traversalQuery, got, want)
}
if got, want := len(tbl.Rows()), 1; got != want {
t.Errorf("planner.Execute failed to return the expected number of rows for query %q; got %d want %d\nGot:\n%v\n", traversalQuery, got, want, tbl)
}
}
func BenchmarkChaining(b *testing.B) {
// Graph traversal data.
traversalTriples := `/u<joe> "parent_of"@[] /u<mary>
/u<joe> "parent_of"@[] /u<peter>
/u<peter> "parent_of"@[] /u<john>
/u<peter> "parent_of"@[] /u<eve>`
traversalQuery := `SELECT ?o FROM ?test
WHERE {
/u<joe> "parent_of"@[] ?o .
?o "parent_of"@[] /u<john>
};`
// Load traversing data
s, ctx := memory.NewStore(), context.Background()
g, gErr := s.NewGraph(ctx, "?test")
if gErr != nil {
b.Fatalf("memory.NewGraph failed to create \"?test\" with error %v", gErr)
}
buf := bytes.NewBufferString(traversalTriples)
if _, err := io.ReadIntoGraph(ctx, g, buf, literal.DefaultBuilder()); err != nil {
b.Fatalf("io.ReadIntoGraph failed to read test graph with error %v", err)
}
for n := 0; n < b.N; n++ {
p, pErr := grammar.NewParser(grammar.SemanticBQL())
if pErr != nil {
b.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", pErr)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(traversalQuery, 1), st); err != nil {
b.Errorf("Parser.consume: failed to parse query %q with error %v", traversalQuery, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
b.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
tbl, err := plnr.Execute(ctx)
if err != nil {
b.Errorf("planner.Execute failed for query %q with error %v", traversalQuery, err)
}
if got, want := len(tbl.Bindings()), 1; got != want {
b.Errorf("tbl.Bindings returned the wrong number of bindings for %q; got %d, want %d", traversalQuery, got, want)
}
if got, want := len(tbl.Rows()), 1; got != want {
b.Errorf("planner.Execute failed to return the expected number of rows for query %q; got %d want %d\nGot:\n%v\n", traversalQuery, got, want, tbl)
}
}
}
// Test to validate https://github.com/google/badwolf/issues/70
func TestReificationResolutionIssue70(t *testing.T) {
// Graph traversal data.
issue70Triples := `/_<c175b457-e6d6-4ce3-8312-674353815720> "_predicate"@[] "/some/immutable/id"@[]
/_<c175b457-e6d6-4ce3-8312-674353815720> "_owner"@[2017-05-23T16:41:12.187373-07:00] /gid<0x9>
/_<c175b457-e6d6-4ce3-8312-674353815720> "_subject"@[] /aid</some/subject/id>
/_<c175b457-e6d6-4ce3-8312-674353815720> "_object"@[] /aid</some/object/id>
/_<cd8bae87-be96-41af-b1a8-27df990c9825> "_object"@[2017-05-23T16:41:12.187373-07:00] /aid</some/object/id>
/_<cd8bae87-be96-41af-b1a8-27df990c9825> "_owner"@[2017-05-23T16:41:12.187373-07:00] /gid<0x6>
/_<cd8bae87-be96-41af-b1a8-27df990c9825> "_predicate"@[2017-05-23T16:41:12.187373-07:00] "/some/temporal/id"@[2017-05-23T16:41:12.187373-07:00]
/_<cd8bae87-be96-41af-b1a8-27df990c9825> "_subject"@[2017-05-23T16:41:12.187373-07:00] /aid</some/subject/id>
/aid</some/subject/id> "/some/temporal/id"@[2017-05-23T16:41:12.187373-07:00] /aid</some/object/id>
/aid</some/subject/id> "/some/immutable/id"@[] /aid</some/object/id>
/aid</some/subject/id> "/some/ownerless_temporal/id"@[2017-05-23T16:41:12.187373-07:00] /aid</some/object/id>`
query := `
SELECT ?bn, ?p
FROM ?test
WHERE {
?bn "_subject"@[,] /aid</some/subject/id>.
?bn "_predicate"@[,] ?p .
?bn "_object"@[,] /aid</some/object/id>
};`
// Load traversing data
s, ctx := memory.NewStore(), context.Background()
g, gErr := s.NewGraph(ctx, "?test")
if gErr != nil {
t.Fatalf("memory.NewGraph failed to create \"?test\" with error %v", gErr)
}
b := bytes.NewBufferString(issue70Triples)
if _, err := io.ReadIntoGraph(ctx, g, b, literal.DefaultBuilder()); err != nil {
t.Fatalf("io.ReadIntoGraph failed to read test graph with error %v", err)
}
p, pErr := grammar.NewParser(grammar.SemanticBQL())
if pErr != nil {
t.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", pErr)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(query, 1), st); err != nil {
t.Errorf("Parser.consume: failed to parse query %q with error %v", query, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
t.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
tbl, err := plnr.Execute(ctx)
if err != nil {
t.Fatalf("planner.Execute failed for query %q with error %v", query, err)
}
if got, want := len(tbl.Bindings()), 2; got != want {
t.Errorf("tbl.Bindings returned the wrong number of bindings for %q; got %d, want %d", query, got, want)
}
if got, want := len(tbl.Rows()), 1; got != want {
t.Errorf("planner.Execute failed to return the expected number of rows for query %q; got %d want %d\nGot:\n%v\n", query, got, want, tbl)
}
}
// benchmarkQuery is a helper function that runs a specified query on the testing data set for benchmarking purposes.
func benchmarkQuery(query string, b *testing.B) {
s, ctx := memory.NewStore(), context.Background()
populateStoreWithTriples(ctx, s, "?test", testTriples, b)
b.ResetTimer()
for n := 0; n < b.N; n++ {
p, err := grammar.NewParser(grammar.SemanticBQL())
if err != nil {
b.Fatalf("grammar.NewParser should have produced a valid BQL parser but got error: %v", err)
}
st := &semantic.Statement{}
if err := p.Parse(grammar.NewLLk(query, 1), st); err != nil {
b.Errorf("Parser.consume: failed to parse query %q with error %v", query, err)
}
plnr, err := New(ctx, s, st, 0, 10, nil)
if err != nil {
b.Errorf("planner.New failed to create a valid query plan with error %v", err)
}
_, err = plnr.Execute(ctx)
if err != nil {
b.Errorf("planner.Execute failed for query %q with error %v", query, err)
}
}
}
// These benchmark tests are used to observe the difference in speed between queries using the "as" keyword as opposed
// to queries that do not.
func BenchmarkReg1(b *testing.B) {
benchmarkQuery(`select ?p, ?o as ?o1 from ?test where {/u<joe> ?p ?o};`, b)
}
func BenchmarkAs1(b *testing.B) {
benchmarkQuery(`select ?p as ?p1, ?o as ?o1 from ?test where {/u<joe> ?p ?o};`, b)
}
func BenchmarkReg2(b *testing.B) {
benchmarkQuery(`select ?s, ?p, ?o from ?test where {?s ?p ?o};`, b)
}
func BenchmarkAs2(b *testing.B) {
benchmarkQuery(`select ?s as ?s1, ?p as ?p1, ?o as ?o1 from ?test where {?s ?p ?o};`, b)
}
|
// Copyright 2018 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tracer contains the implementation of basic execution tracing tools.
package tracer
import (
"io"
"time"
)
// Trace attempts to write a trace if a valid writer is provided. The
// tracer is lazy on the string generation to avoid adding too much
// overhead when tracing ins not on.
func Trace(w io.Writer, msgs func() []string) {
if w == nil {
return
}
for _, msg := range msgs() {
w.Write([]byte("["))
w.Write([]byte(time.Now().Format("2006-01-02T15:04:05.999999-07:00")))
w.Write([]byte("] "))
w.Write([]byte(msg))
w.Write([]byte("\n"))
}
}
Extend the tracer to better support concurrent events.
// Copyright 2018 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tracer contains the implementation of basic execution tracing tools.
package tracer
import (
"io"
"time"
)
type event struct {
w io.Writer
msgs func() []string
}
var c chan *event
func init() {
c = make(chan *event, 10000) // Large enought to avoid blocking as much as possible.
go func() {
for e := range c {
for _, msg := range e.msgs() {
e.w.Write([]byte("["))
e.w.Write([]byte(time.Now().Format("2006-01-02T15:04:05.999999-07:00")))
e.w.Write([]byte("] "))
e.w.Write([]byte(msg))
e.w.Write([]byte("\n"))
}
}
}()
}
// Trace attempts to write a trace if a valid writer is provided. The
// tracer is lazy on the string generation to avoid adding too much
// overhead when tracing ins not on.
func Trace(w io.Writer, msgs func() []string) {
if w == nil {
return
}
c <- &event{w, msgs}
}
|
package server
import (
"bytes"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/pachyderm/pachyderm"
"github.com/pachyderm/pachyderm/src/client"
pfsclient "github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/require"
"github.com/pachyderm/pachyderm/src/client/pkg/uuid"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
pfspretty "github.com/pachyderm/pachyderm/src/server/pfs/pretty"
"github.com/pachyderm/pachyderm/src/server/pkg/workload"
ppsserver "github.com/pachyderm/pachyderm/src/server/pps"
ppspretty "github.com/pachyderm/pachyderm/src/server/pps/pretty"
pps_server "github.com/pachyderm/pachyderm/src/server/pps/server"
"go.pedge.io/proto/time"
"golang.org/x/net/context"
"k8s.io/kubernetes/pkg/api"
kube_client "k8s.io/kubernetes/pkg/client/restclient"
kube "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels"
kube_labels "k8s.io/kubernetes/pkg/labels"
)
const (
NUMFILES = 25
KB = 1024
)
func TestJob(t *testing.T) {
t.Parallel()
testJob(t, 4)
}
func TestJobNoShard(t *testing.T) {
t.Parallel()
testJob(t, 0)
}
func testJob(t *testing.T, shards int) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
dataRepo := uniqueString("TestJob_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
fileContent := "foo\n"
// We want to create lots of files so that each parallel job will be
// started with some files
numFiles := shards*100 + 100
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit.ID, fmt.Sprintf("file-%d", i), strings.NewReader(fileContent))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
job, err := c.CreateJob(
"",
[]string{"bash"},
[]string{fmt.Sprintf("cp %s %s", path.Join("/pfs", dataRepo, "*"), "/pfs/out")},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: uint64(shards),
},
[]*ppsclient.JobInput{{
Commit: commit,
Method: client.ReduceMethod,
}},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
parellelism, err := pps_server.GetExpectedNumWorkers(getKubeClient(t), jobInfo.ParallelismSpec)
require.NoError(t, err)
require.True(t, parellelism > 0)
commitInfo, err := c.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID)
require.NoError(t, err)
require.Equal(t, pfsclient.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType)
require.NotNil(t, jobInfo.Started)
require.NotNil(t, jobInfo.Finished)
require.True(t, prototime.TimestampToTime(jobInfo.Finished).After(prototime.TimestampToTime(jobInfo.Started)))
for i := 0; i < numFiles; i++ {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, fmt.Sprintf("file-%d", i), 0, 0, "", false, nil, &buffer))
require.Equal(t, fileContent, buffer.String())
}
}
func TestPachCommitIdEnvVarInJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
shards := 0
c := getPachClient(t)
repos := []string{
uniqueString("TestJob_FriarTuck"),
uniqueString("TestJob_RobinHood"),
}
var commits []*pfsclient.Commit
for _, repo := range repos {
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
fileContent := "foo\n"
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader(fileContent))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
commits = append(commits, commit)
}
job, err := c.CreateJob(
"",
[]string{"bash"},
[]string{
fmt.Sprintf("echo $PACH_%v_COMMIT_ID > /pfs/out/input-id-%v", pps_server.RepoNameToEnvString(repos[0]), repos[0]),
fmt.Sprintf("echo $PACH_%v_COMMIT_ID > /pfs/out/input-id-%v", pps_server.RepoNameToEnvString(repos[1]), repos[1]),
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: uint64(shards),
},
[]*ppsclient.JobInput{
{
Commit: commits[0],
Method: client.ReduceMethod,
},
{
Commit: commits[1],
Method: client.ReduceMethod,
},
},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
parallelism, err := pps_server.GetExpectedNumWorkers(getKubeClient(t), jobInfo.ParallelismSpec)
require.NoError(t, err)
require.True(t, parallelism > 0)
commitInfo, err := c.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID)
require.NoError(t, err)
require.Equal(t, pfsclient.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, fmt.Sprintf("input-id-%v", repos[0]), 0, 0, "", false, nil, &buffer))
require.Equal(t, jobInfo.Inputs[0].Commit.ID, strings.TrimSpace(buffer.String()))
buffer.Reset()
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, fmt.Sprintf("input-id-%v", repos[1]), 0, 0, "", false, nil, &buffer))
require.Equal(t, jobInfo.Inputs[1].Commit.ID, strings.TrimSpace(buffer.String()))
}
func TestDuplicatedJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
dataRepo := uniqueString("TestDuplicatedJob_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
fileContent := "foo\n"
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader(fileContent))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipelineName := uniqueString("TestDuplicatedJob_pipeline")
_, err = c.PfsAPIClient.CreateRepo(
context.Background(),
&pfsclient.CreateRepoRequest{
Repo: client.NewRepo(pipelineName),
Provenance: []*pfsclient.Repo{client.NewRepo(dataRepo)},
},
)
require.NoError(t, err)
cmd := []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}
// Now we manually create the same job
req := &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: cmd,
},
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
Inputs: []*ppsclient.JobInput{{
Commit: commit,
}},
}
job1, err := c.PpsAPIClient.CreateJob(context.Background(), req)
require.NoError(t, err)
job2, err := c.PpsAPIClient.CreateJob(context.Background(), req)
require.NoError(t, err)
require.Equal(t, job1, job2)
req.Force = true
job3, err := c.PpsAPIClient.CreateJob(context.Background(), req)
require.NoError(t, err)
require.NotEqual(t, job1, job3)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, fileContent, buffer.String())
}
func TestLogs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
job, err := c.CreateJob(
"",
[]string{"echo", "foo"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 4,
},
[]*ppsclient.JobInput{},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
_, err = c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
// TODO we Sleep here because even though the job has completed kubernetes
// might not have even noticed the container was created yet
time.Sleep(10 * time.Second)
var buffer bytes.Buffer
require.NoError(t, c.GetLogs(job.ID, &buffer))
require.Equal(t, "0 | foo\n1 | foo\n2 | foo\n3 | foo\n", buffer.String())
// Should get an error if the job does not exist
require.YesError(t, c.GetLogs("nonexistent", &buffer))
}
func TestGrep(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
dataRepo := uniqueString("TestGrep_data")
c := getPachClient(t)
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < 100; i++ {
_, err = c.PutFile(dataRepo, commit.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo\nbar\nfizz\nbuzz\n"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
job1, err := c.CreateJob(
"",
[]string{"bash"},
[]string{fmt.Sprintf("grep foo /pfs/%s/* >/pfs/out/foo", dataRepo)},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.JobInput{{Commit: commit}},
"",
)
require.NoError(t, err)
job2, err := c.CreateJob(
"",
[]string{"bash"},
[]string{fmt.Sprintf("grep foo /pfs/%s/* >/pfs/out/foo", dataRepo)},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 4,
},
[]*ppsclient.JobInput{{Commit: commit}},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
job1Info, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
inspectJobRequest.Job = job2
job2Info, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
repo1Info, err := c.InspectRepo(job1Info.OutputCommit.Repo.Name)
require.NoError(t, err)
repo2Info, err := c.InspectRepo(job2Info.OutputCommit.Repo.Name)
require.NoError(t, err)
require.Equal(t, repo1Info.SizeBytes, repo2Info.SizeBytes)
}
func TestJobLongOutputLine(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
job, err := c.CreateJob(
"",
[]string{"sh"},
[]string{"yes | tr -d '\\n' | head -c 1000000 > /pfs/out/file"},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.JobInput{},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
}
func TestPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{
Repo: &pfsclient.Repo{Name: dataRepo},
Method: client.MapMethod,
}},
false,
))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{&pfsclient.Commit{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
// Do second commit to repo
commit2, err := c.StartCommit(dataRepo, commit1.ID)
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{outCommits[0].Commit},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
require.NotNil(t, listCommitResponse.CommitInfo[0].ParentCommit)
require.Equal(t, outCommits[0].Commit.ID, listCommitResponse.CommitInfo[0].ParentCommit.ID)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "bar\n", buffer.String())
require.NoError(t, c.DeletePipeline(pipelineName))
pipelineInfos, err := c.PpsAPIClient.ListPipeline(context.Background(), &ppsclient.ListPipelineRequest{})
require.NoError(t, err)
for _, pipelineInfo := range pipelineInfos.PipelineInfo {
require.True(t, pipelineInfo.Pipeline.Name != pipelineName)
}
// Do third commit to repo; this time pipeline should not run since it's been deleted
commit3, err := c.StartCommit(dataRepo, commit2.ID)
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit3.ID, "file", strings.NewReader("buzz\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit3.ID))
// We will sleep a while to wait for the pipeline to actually get cancelled
// Also if the pipeline didn't get cancelled (due to a bug), we sleep a while
// to let the pipeline commit
time.Sleep(5 * time.Second)
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{&pfsclient.Commit{
Repo: outRepo,
}},
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
// there should only be two commits in the pipeline
require.Equal(t, 2, len(listCommitResponse.CommitInfo))
}
func TestPipelineWithEmptyInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repo
dataRepo := uniqueString("data")
require.NoError(t, c.CreateRepo(dataRepo))
// create a pipeline that doesn't run with empty commits
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{
"echo foo > /pfs/out/file",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{
Repo: &pfsclient.Repo{Name: dataRepo},
RunEmpty: false,
}},
false,
))
// Add first empty commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
require.Equal(t, 0, int(outCommits[0].SizeBytes))
// An empty job should've been created
jobInfos, err := c.ListJob(pipelineName, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
require.Equal(t, ppsclient.JobState_JOB_EMPTY, jobInfos[0].State)
// Make another empty commit in the input repo
// The output commit should have the previous output commit as its parent
parentOutputCommit := outCommits[0].Commit
commit2, err := c.StartCommit(dataRepo, commit1.ID)
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{parentOutputCommit},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
require.Equal(t, 0, int(outCommits[0].SizeBytes))
require.Equal(t, parentOutputCommit.ID, outCommits[0].ParentCommit.ID)
jobInfos, err = c.ListJob(pipelineName, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobInfos))
require.Equal(t, ppsclient.JobState_JOB_EMPTY, jobInfos[1].State)
// create a pipeline that runs with empty commits
dataRepo = uniqueString("data")
require.NoError(t, c.CreateRepo(dataRepo))
pipelineName = uniqueString("pipeline")
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
outRepo = ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{
"echo foo > /pfs/out/file",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{
Repo: &pfsclient.Repo{Name: dataRepo},
RunEmpty: true,
}},
false,
))
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
require.Equal(t, len("foo\n"), int(outCommits[0].SizeBytes))
jobInfos, err = c.ListJob(pipelineName, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
}
func TestPipelineWithTooMuchParallelism(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipelineWithTooMuchParallelism_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
// This pipeline will fail if any pod sees empty input, since cp won't
// be able to find the file.
// We have parallelism set to 3 so that if we actually start 3 pods,
// which would be a buggy behavior, some jobs don't see any files
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
[]*ppsclient.PipelineInput{{
Repo: &pfsclient.Repo{Name: dataRepo},
// Use reduce method so only one pod gets the file
Method: client.ReduceMethod,
}},
false,
))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
require.Equal(t, false, outCommits[0].Cancelled)
}
func TestPipelineWithNoInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"NEW_UUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)",
"echo foo > /pfs/out/$NEW_UUID",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
nil,
false,
))
// Manually trigger the pipeline
job, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
parallelism, err := pps_server.GetExpectedNumWorkers(getKubeClient(t), jobInfo.ParallelismSpec)
require.NoError(t, err)
require.Equal(t, 3, int(parallelism))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
fileInfos, err := c.ListFile(outRepo.Name, outCommits[0].Commit.ID, "", "", false, nil, false)
require.NoError(t, err)
require.Equal(t, 3, len(fileInfos))
// Make sure that each job gets a different ID
job2, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
require.True(t, job.ID != job2.ID)
}
func TestPipelineThatWritesToOneFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"dd if=/dev/zero of=/pfs/out/file bs=10 count=1",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
nil,
false,
))
// Manually trigger the pipeline
_, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, 30, buffer.Len())
}
func TestPipelineThatOverwritesFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"echo foo > /pfs/out/file",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
nil,
false,
))
// Manually trigger the pipeline
job, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\n", buffer.String())
// Manually trigger the pipeline
_, err = c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
ParentJob: job,
})
require.NoError(t, err)
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{outCommits[0].Commit},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer2))
// we expect only 3 foos here because > _overwrites_ rather than appending.
// Appending is done with >>.
require.Equal(t, "foo\nfoo\nfoo\n", buffer2.String())
}
func TestPipelineThatAppendsToFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"echo foo >> /pfs/out/file",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
nil,
false,
))
// Manually trigger the pipeline
job, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\n", buffer.String())
// Manually trigger the pipeline
_, err = c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
ParentJob: job,
})
require.NoError(t, err)
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{outCommits[0].Commit},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer2))
require.Equal(t, "foo\nfoo\nfoo\nfoo\nfoo\nfoo\n", buffer2.String())
}
func TestRemoveAndAppend(t *testing.T) {
testParellelRemoveAndAppend(t, 1)
}
func TestParellelRemoveAndAppend(t *testing.T) {
// This test does not pass on Travis which is why it's skipped right now As
// soon as we have a hypothesis for why this fails on travis but not
// locally we should un skip this test and try to fix it.
testParellelRemoveAndAppend(t, 3)
}
func testParellelRemoveAndAppend(t *testing.T, parallelism int) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel() //cleanup resources
job1, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"echo foo > /pfs/out/file",
},
},
ParallelismSpec: &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: uint64(parallelism),
},
})
require.NoError(t, err)
inspectJobRequest1 := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
jobInfo1, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest1)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo1.State)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo1.OutputCommit.Repo.Name, jobInfo1.OutputCommit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, strings.Repeat("foo\n", parallelism), buffer.String())
job2, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"unlink /pfs/out/file && echo bar > /pfs/out/file",
},
},
ParallelismSpec: &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: uint64(parallelism),
},
ParentJob: job1,
})
require.NoError(t, err)
inspectJobRequest2 := &ppsclient.InspectJobRequest{
Job: job2,
BlockState: true,
}
jobInfo2, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest2)
require.NoError(t, err)
c.GetLogs(jobInfo2.Job.ID, os.Stdout)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo2.State)
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(jobInfo2.OutputCommit.Repo.Name, jobInfo2.OutputCommit.ID, "file", 0, 0, "", false, nil, &buffer2))
require.Equal(t, strings.Repeat("bar\n", parallelism), buffer2.String())
}
func TestWorkload(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
seed := time.Now().UnixNano()
require.NoError(t, workload.RunWorkload(c, rand.New(rand.NewSource(seed)), 100))
}
func TestSharding(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
repo := uniqueString("TestSharding")
c := getPachClient(t)
err := c.CreateRepo(repo)
require.NoError(t, err)
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
var wg sync.WaitGroup
for i := 0; i < NUMFILES; i++ {
i := i
wg.Add(1)
go func() {
defer wg.Done()
rand := rand.New(rand.NewSource(int64(i)))
_, err = c.PutFile(repo, commit.ID, fmt.Sprintf("file%d", i), workload.NewReader(rand, KB))
require.NoError(t, err)
}()
}
wg.Wait()
err = c.FinishCommit(repo, commit.ID)
require.NoError(t, err)
wg = sync.WaitGroup{}
for i := 0; i < NUMFILES; i++ {
i := i
wg.Add(1)
go func() {
defer wg.Done()
var buffer1Shard bytes.Buffer
var buffer4Shard bytes.Buffer
shard := &pfsclient.Shard{FileModulus: 1, BlockModulus: 1}
err := c.GetFile(repo, commit.ID,
fmt.Sprintf("file%d", i), 0, 0, "", false, shard, &buffer1Shard)
require.NoError(t, err)
shard.BlockModulus = 4
for blockNumber := uint64(0); blockNumber < 4; blockNumber++ {
shard.BlockNumber = blockNumber
c.GetFile(repo, commit.ID, fmt.Sprintf("file%d", i), 0, 0, "", false, shard, &buffer4Shard)
}
require.Equal(t, buffer1Shard.Len(), buffer4Shard.Len())
}()
}
wg.Wait()
}
func TestFromCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
repo := uniqueString("TestFromCommit")
c := getPachClient(t)
seed := time.Now().UnixNano()
rand := rand.New(rand.NewSource(seed))
err := c.CreateRepo(repo)
require.NoError(t, err)
commit1, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "file", workload.NewReader(rand, KB))
require.NoError(t, err)
err = c.FinishCommit(repo, commit1.ID)
require.NoError(t, err)
commit2, err := c.StartCommit(repo, commit1.ID)
require.NoError(t, err)
_, err = c.PutFile(repo, commit2.ID, "file", workload.NewReader(rand, KB))
require.NoError(t, err)
err = c.FinishCommit(repo, commit2.ID)
require.NoError(t, err)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(repo, commit2.ID, "file", 0, 0, commit1.ID, false, nil, &buffer))
require.Equal(t, buffer.Len(), KB)
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(repo, commit2.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, buffer.Len(), 2*KB)
}
func TestSimple(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("TestSimple")
require.NoError(t, c.CreateRepo(repo))
commit1, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "foo", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit1.ID))
commitInfos, err := c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{repo},
}}, nil, client.CommitTypeNone, pfsclient.CommitStatus_NORMAL, false)
require.NoError(t, err)
require.Equal(t, 1, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(repo, commit1.ID, "foo", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
commit2, err := c.StartCommit(repo, commit1.ID)
require.NoError(t, err)
_, err = c.PutFile(repo, commit2.ID, "foo", strings.NewReader("foo\n"))
require.NoError(t, err)
err = c.FinishCommit(repo, commit2.ID)
require.NoError(t, err)
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(repo, commit1.ID, "foo", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(repo, commit2.ID, "foo", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\nfoo\n", buffer.String())
}
func TestPipelineWithMultipleInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
inputRepo1 := uniqueString("inputRepo")
require.NoError(t, c.CreateRepo(inputRepo1))
inputRepo2 := uniqueString("inputRepo")
require.NoError(t, c.CreateRepo(inputRepo2))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{fmt.Sprintf(`
repo1=%s
repo2=%s
echo $repo1
ls -1 /pfs/$repo1
echo $repo2
ls -1 /pfs/$repo2
for f1 in /pfs/$repo1/*
do
for f2 in /pfs/$repo2/*
do
v1=$(<$f1)
v2=$(<$f2)
echo $v1$v2 >> /pfs/out/file
done
done
`, inputRepo1, inputRepo2)},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 4,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: inputRepo1},
Method: client.IncrementalReduceMethod,
},
{
Repo: &pfsclient.Repo{Name: inputRepo2},
Method: client.IncrementalReduceMethod,
},
},
false,
))
content := "foo"
numfiles := 10
commit1, err := c.StartCommit(inputRepo1, "master")
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo1, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo1, commit1.ID))
commit2, err := c.StartCommit(inputRepo2, "master")
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo2, commit2.ID, fmt.Sprintf("file%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo2, commit2.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
fileInfos, err := c.ListFile(pipelineName, outCommits[0].Commit.ID, "", "", false, nil, false)
require.NoError(t, err)
require.Equal(t, 1, len(fileInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
lines := strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, numfiles*numfiles, len(lines))
for _, line := range lines {
require.Equal(t, len(content)*2, len(line))
}
commit3, err := c.StartCommit(inputRepo1, commit1.ID)
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo1, commit3.ID, fmt.Sprintf("file2-%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo1, commit3.ID))
listCommitRequest.FromCommits = append(listCommitRequest.FromCommits, outCommits[0].Commit)
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
lines = strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, 2*numfiles*numfiles, len(lines))
for _, line := range lines {
require.Equal(t, len(content)*2, len(line))
}
commit4, err := c.StartCommit(inputRepo2, commit2.ID)
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo2, commit4.ID, fmt.Sprintf("file2-%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo2, commit4.ID))
listCommitRequest.FromCommits[0] = outCommits[0].Commit
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
lines = strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, 4*numfiles*numfiles, len(lines))
for _, line := range lines {
require.Equal(t, len(content)*2, len(line))
}
}
func TestPipelineWithGlobalMethod(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
globalRepo := uniqueString("inputRepo")
require.NoError(t, c.CreateRepo(globalRepo))
numfiles := 20
pipelineName := uniqueString("pipeline")
parallelism := 2
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
// this script simply outputs the number of files under the global repo
[]string{fmt.Sprintf(`
numfiles=(/pfs/%s/*)
numfiles=${#numfiles[@]}
echo $numfiles > /pfs/out/file
`, globalRepo)},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: uint64(parallelism),
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: globalRepo},
Method: client.GlobalMethod,
},
},
false,
))
content := "foo"
commit, err := c.StartCommit(globalRepo, "master")
require.NoError(t, err)
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(globalRepo, commit.ID, fmt.Sprintf("file%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(globalRepo, commit.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
fileInfos, err := c.ListFile(pipelineName, outCommits[0].Commit.ID, "", "", false, nil, false)
require.NoError(t, err)
require.Equal(t, 1, len(fileInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
lines := strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, parallelism, len(lines)) // each job outputs one line
for _, line := range lines {
require.Equal(t, fmt.Sprintf("%d", numfiles), line)
}
}
func TestPipelineWithPrevRepoAndIncrementalReduceMethod(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("repo")
require.NoError(t, c.CreateRepo(repo))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{fmt.Sprintf(`
cat /pfs/%s/file >>/pfs/out/file
if [ -d "/pfs/prev" ]; then
cat /pfs/prev/file >>/pfs/out/file
fi
`, repo)},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
Method: client.IncrementalReduceMethod,
},
},
false,
))
commit1, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, c.FinishCommit(repo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
lines := strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, 1, len(lines))
require.Equal(t, "foo", lines[0])
commit2, err := c.StartCommit(repo, commit1.ID)
require.NoError(t, err)
_, err = c.PutFile(repo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, c.FinishCommit(repo, commit2.ID))
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{outCommits[0].Commit},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer2))
lines = strings.Split(strings.TrimSpace(buffer2.String()), "\n")
require.Equal(t, 3, len(lines))
require.Equal(t, "foo", lines[0])
require.Equal(t, "bar", lines[1])
require.Equal(t, "foo", lines[2])
}
func TestPipelineThatUseNonexistentInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
pipelineName := uniqueString("pipeline")
require.YesError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: "nonexistent"},
},
},
false,
))
}
func TestPipelineWhoseInputsGetDeleted(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("repo")
require.NoError(t, c.CreateRepo(repo))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{"true"},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
},
},
false,
))
// Shouldn't be able to delete the input repo because it's the provenance
// of the output repo.
require.YesError(t, c.DeleteRepo(repo, false))
// The correct flow to delete the input repo
require.NoError(t, c.DeletePipeline(pipelineName))
require.NoError(t, c.DeleteRepo(pipelineName, false))
require.NoError(t, c.DeleteRepo(repo, false))
}
// This test fails if you updated some static assets (such as doc/pipeline_spec.md)
// that are used in code but forgot to run:
// $ make assets
func TestAssets(t *testing.T) {
assetPaths := []string{"doc/pipeline_spec.md"}
for _, path := range assetPaths {
doc, err := ioutil.ReadFile(filepath.Join(os.Getenv("GOPATH"), "src/github.com/pachyderm/pachyderm/", path))
if err != nil {
t.Fatal(err)
}
asset, err := pachyderm.Asset(path)
if err != nil {
t.Fatal(err)
}
require.Equal(t, doc, asset)
}
}
// TestProvenance creates a pipeline DAG that's not a transitive reduction
// It looks like this:
// A
// | \
// v v
// B-->C
// When we commit to A we expect to see 1 commit on C rather than 2.
func TestProvenance(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
aRepo := uniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
bPipeline := uniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(aRepo)}},
false,
))
cPipeline := uniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("diff %s %s >/pfs/out/file",
path.Join("/pfs", aRepo, "file"), path.Join("/pfs", bPipeline, "file"))},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{Repo: client.NewRepo(aRepo)},
{Repo: client.NewRepo(bPipeline)},
},
false,
))
// commit to aRepo
commit1, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit1.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit(aRepo, commit1.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 3, len(commitInfos))
commit2, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit2.ID))
commitInfos, err = c.FlushCommit([]*pfsclient.Commit{client.NewCommit(aRepo, commit2.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
// There should only be 2 commits on cRepo
commitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{cPipeline},
}}, nil, pfsclient.CommitType_COMMIT_TYPE_READ, pfsclient.CommitStatus_NORMAL, false)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
for _, commitInfo := range commitInfos {
// C takes the diff of 2 files that should always be the same, so we
// expect an empty file
fileInfo, err := c.InspectFile(cPipeline, commitInfo.Commit.ID, "file", "", false, nil)
require.NoError(t, err)
require.Equal(t, uint64(0), fileInfo.SizeBytes)
}
}
func TestDirectory(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel() //cleanup resources
job1, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"mkdir /pfs/out/dir",
"echo foo >> /pfs/out/dir/file",
},
},
ParallelismSpec: &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
})
require.NoError(t, err)
inspectJobRequest1 := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
jobInfo1, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest1)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo1.State)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo1.OutputCommit.Repo.Name, jobInfo1.OutputCommit.ID, "dir/file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\n", buffer.String())
job2, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"echo bar >> /pfs/out/dir/file",
},
},
ParallelismSpec: &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
ParentJob: job1,
})
require.NoError(t, err)
inspectJobRequest2 := &ppsclient.InspectJobRequest{
Job: job2,
BlockState: true,
}
jobInfo2, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest2)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo2.State)
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(jobInfo2.OutputCommit.Repo.Name, jobInfo2.OutputCommit.ID, "dir/file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\nbar\nbar\nbar\n", buffer.String())
}
func TestFailedJobReadData(t *testing.T) {
// We want to enable users to be able to read data from cancelled commits for debugging purposes`
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
shards := 0
c := getPachClient(t)
repo := uniqueString("TestJob_Foo")
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
fileContent := "foo\n"
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader(fileContent))
require.NoError(t, err)
err = c.FinishCommit(repo, commit.ID)
require.NoError(t, err)
job, err := c.CreateJob(
"",
[]string{"bash"},
[]string{
"echo fubar > /pfs/out/file",
"exit 1",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: uint64(shards),
},
[]*ppsclient.JobInput{
{
Commit: commit,
Method: client.ReduceMethod,
},
},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_FAILURE.String(), jobInfo.State.String())
parallelism, err := pps_server.GetExpectedNumWorkers(getKubeClient(t), jobInfo.ParallelismSpec)
require.NoError(t, err)
require.True(t, parallelism > 0)
c.GetLogs(jobInfo.Job.ID, os.Stdout)
commitInfo, err := c.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID)
require.NoError(t, err)
require.Equal(t, pfsclient.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType)
require.Equal(t, true, commitInfo.Cancelled)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "fubar", strings.TrimSpace(buffer.String()))
}
// TestFlushCommit
func TestFlushCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
prefix := uniqueString("repo")
makeRepoName := func(i int) string {
return fmt.Sprintf("%s_%d", prefix, i)
}
sourceRepo := makeRepoName(0)
require.NoError(t, c.CreateRepo(sourceRepo))
// Create a five-stage pipeline
numStages := 5
for i := 0; i < numStages; i++ {
repo := makeRepoName(i)
require.NoError(t, c.CreatePipeline(
makeRepoName(i+1),
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
false,
))
}
test := func(parent string) string {
commit, err := c.StartCommit(sourceRepo, parent)
require.NoError(t, err)
_, err = c.PutFile(sourceRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(sourceRepo, commit.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil)
require.NoError(t, err)
require.Equal(t, numStages+1, len(commitInfos))
return commit.ID
}
// Run the test twice, once on a orphan commit and another on
// a commit with a parent
commit := test(uuid.New())
test(commit)
}
func TestFlushCommitAfterCreatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("TestFlushCommitAfterCreatePipeline")
require.NoError(t, c.CreateRepo(repo))
for i := 0; i < 10; i++ {
_, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, "master", "file", strings.NewReader(fmt.Sprintf("foo%d\n", i)))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, "master"))
}
pipeline := uniqueString("TestFlushCommitAfterCreatePipelinePipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
false,
))
_, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit(repo, "master")}, nil)
require.NoError(t, err)
}
// TestFlushCommitWithFailure is similar to TestFlushCommit except that
// the pipeline is designed to fail
func TestFlushCommitWithFailure(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
prefix := uniqueString("repo")
makeRepoName := func(i int) string {
return fmt.Sprintf("%s_%d", prefix, i)
}
sourceRepo := makeRepoName(0)
require.NoError(t, c.CreateRepo(sourceRepo))
// Create a five-stage pipeline; the third stage is designed to fail
numStages := 5
for i := 0; i < numStages; i++ {
fileName := "file"
if i == 3 {
fileName = "nonexistent"
}
repo := makeRepoName(i)
require.NoError(t, c.CreatePipeline(
makeRepoName(i+1),
"",
[]string{"cp", path.Join("/pfs", repo, fileName), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
false,
))
}
commit, err := c.StartCommit(sourceRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(sourceRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(sourceRepo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil)
require.YesError(t, err)
}
// TestRecreatePipeline tracks #432
func TestRecreatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
pipeline := uniqueString("pipeline")
createPipeline := func() {
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
false,
))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipeline},
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
}
// Do it twice. We expect jobs to be created on both runs.
createPipeline()
require.NoError(t, c.DeleteRepo(pipeline, false))
require.NoError(t, c.DeletePipeline(pipeline))
createPipeline()
}
func TestPipelineState(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Skip("after the refactor, it's a little unclear how you'd introduce an error into a pipeline; see #762")
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipeline := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
false,
))
time.Sleep(5 * time.Second) // wait for this pipeline to get picked up
pipelineInfo, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, ppsclient.PipelineState_PIPELINE_RUNNING, pipelineInfo.State)
// Now we introduce an error to the pipeline by removing its output repo
// and starting a job
require.NoError(t, c.DeleteRepo(pipeline, false))
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
// So the state of the pipeline will alternate between running and
// restarting. We just want to make sure that it has definitely restarted.
var states []interface{}
for i := 0; i < 20; i++ {
time.Sleep(500 * time.Millisecond)
pipelineInfo, err = c.InspectPipeline(pipeline)
require.NoError(t, err)
states = append(states, pipelineInfo.State)
}
require.EqualOneOf(t, states, ppsclient.PipelineState_PIPELINE_RESTARTING)
}
func TestPipelineJobCounts(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipeline := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
false,
))
// Trigger a job by creating a commit
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
jobInfos, err := c.ListJob(pipeline, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: jobInfos[0].Job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
_, err = c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
// check that the job has been accounted for
pipelineInfo, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, int32(1), pipelineInfo.JobCounts[int32(ppsclient.JobState_JOB_SUCCESS)])
}
func TestJobState(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// This job uses a nonexistent image; it's supposed to stay in the
// "pulling" state
job, err := c.CreateJob(
"nonexistent",
[]string{"bash"},
nil,
&ppsclient.ParallelismSpec{},
nil,
"",
)
require.NoError(t, err)
time.Sleep(10 * time.Second)
jobInfo, err := c.InspectJob(job.ID, false)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_PULLING, jobInfo.State)
// This job sleeps for 20 secs
job, err = c.CreateJob(
"",
[]string{"bash"},
[]string{"sleep 20"},
&ppsclient.ParallelismSpec{},
nil,
"",
)
require.NoError(t, err)
time.Sleep(10 * time.Second)
jobInfo, err = c.InspectJob(job.ID, false)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_RUNNING, jobInfo.State)
// Wait for the job to complete
jobInfo, err = c.InspectJob(job.ID, true)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo.State)
}
func TestClusterFunctioningAfterMembershipChange(t *testing.T) {
t.Skip("this test is flaky")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
scalePachd(t, true)
testJob(t, 4)
scalePachd(t, false)
testJob(t, 4)
}
func TestDeleteAfterMembershipChange(t *testing.T) {
t.Skip("this test is flaky")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
test := func(up bool) {
repo := uniqueString("TestDeleteAfterMembershipChange")
c := getPachClient(t)
require.NoError(t, c.CreateRepo(repo))
_, err := c.StartCommit(repo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, "master"))
scalePachd(t, up)
c = getUsablePachClient(t)
require.NoError(t, c.DeleteRepo(repo, false))
}
test(true)
test(false)
}
func TestScrubbedErrors(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
_, err := c.InspectPipeline("blah")
require.Equal(t, "PipelineInfos blah not found", err.Error())
err = c.CreatePipeline(
"lskdjf$#%^ERTYC",
"",
[]string{},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: "test"}}},
false,
)
require.Equal(t, "repo test not found", err.Error())
_, err = c.CreateJob("askjdfhgsdflkjh", []string{}, []string{}, &ppsclient.ParallelismSpec{}, []*ppsclient.JobInput{client.NewJobInput("bogusRepo", "bogusCommit", client.DefaultMethod)}, "")
require.Matches(t, "could not create repo job_.*, not all provenance repos exist", err.Error())
_, err = c.InspectJob("blah", true)
require.Equal(t, "JobInfos blah not found", err.Error())
home := os.Getenv("HOME")
f, err := os.Create(filepath.Join(home, "/tmpfile"))
defer func() {
os.Remove(filepath.Join(home, "/tmpfile"))
}()
require.NoError(t, err)
err = c.GetLogs("bogusJobId", f)
require.Equal(t, "job bogusJobId not found", err.Error())
}
func TestLeakingRepo(t *testing.T) {
// If CreateJob fails, it should also destroy the output repo it creates
// If it doesn't, it can cause flush commit to fail, as a bogus repo will
// be listed in the output repo's provenance
// This test can't be run in parallel, since it requires using the repo counts as controls
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
repoInfos, err := c.ListRepo(nil)
require.NoError(t, err)
initialCount := len(repoInfos)
_, err = c.CreateJob("bogusImage", []string{}, []string{}, &ppsclient.ParallelismSpec{}, []*ppsclient.JobInput{client.NewJobInput("bogusRepo", "bogusCommit", client.DefaultMethod)}, "")
require.Matches(t, "could not create repo job_.*, not all provenance repos exist", err.Error())
repoInfos, err = c.ListRepo(nil)
require.NoError(t, err)
require.Equal(t, initialCount, len(repoInfos))
}
func TestAcceptReturnCode(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
job, err := c.PpsAPIClient.CreateJob(
context.Background(),
&ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{"exit 1"},
AcceptReturnCode: []int64{1},
},
ParallelismSpec: &ppsclient.ParallelismSpec{},
},
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
}
func TestRestartAll(t *testing.T) {
t.Skip("this test is flaky")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
// this test cannot be run in parallel because it restarts everything which breaks other tests.
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestRestartAll_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
false,
))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
restartAll(t)
// need a new client because the old one will have a defunct connection
c = getUsablePachClient(t)
// Wait a little for pipelines to restart
time.Sleep(10 * time.Second)
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
require.Equal(t, ppsclient.PipelineState_PIPELINE_RUNNING, pipelineInfo.State)
_, err = c.InspectRepo(dataRepo)
require.NoError(t, err)
_, err = c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
}
func TestRestartOne(t *testing.T) {
t.Skip("this test is flaky")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
// this test cannot be run in parallel because it restarts everything which breaks other tests.
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestRestartOne_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
false,
))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
restartOne(t)
// need a new client because the old one will have a defunct connection
c = getUsablePachClient(t)
_, err = c.InspectPipeline(pipelineName)
require.NoError(t, err)
_, err = c.InspectRepo(dataRepo)
require.NoError(t, err)
_, err = c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
}
func TestPrettyPrinting(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPrettyPrinting_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
false,
))
// Do a commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
repoInfo, err := c.InspectRepo(dataRepo)
require.NoError(t, err)
require.NoError(t, pfspretty.PrintDetailedRepoInfo(repoInfo))
for _, commitInfo := range commitInfos {
require.NoError(t, pfspretty.PrintDetailedCommitInfo(commitInfo))
}
fileInfo, err := c.InspectFile(dataRepo, commit.ID, "file", "", false, nil)
require.NoError(t, err)
require.NoError(t, pfspretty.PrintDetailedFileInfo(fileInfo))
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
require.NoError(t, ppspretty.PrintDetailedPipelineInfo(pipelineInfo))
jobInfos, err := c.ListJob("", nil)
require.NoError(t, err)
require.True(t, len(jobInfos) > 0)
require.NoError(t, ppspretty.PrintDetailedJobInfo(jobInfos[0]))
}
func TestDeleteAll(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
// this test cannot be run in parallel because it deletes everything
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestDeleteAll_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
false,
))
// Do commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
require.NoError(t, c.DeleteAll())
repoInfos, err := c.ListRepo(nil)
require.NoError(t, err)
require.Equal(t, 0, len(repoInfos))
pipelineInfos, err := c.ListPipeline()
require.NoError(t, err)
require.Equal(t, 0, len(pipelineInfos))
jobInfos, err := c.ListJob("", nil)
require.NoError(t, err)
require.Equal(t, 0, len(jobInfos))
}
func TestRecursiveCp(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestRecursiveCp_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("TestRecursiveCp")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
fmt.Sprintf("mkdir /inputs"),
fmt.Sprintf("cp -r /pfs/%s /inputs", dataRepo),
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{
Repo: client.NewRepo(dataRepo),
Method: client.IncrementalReduceMethod,
}},
false,
))
// Do commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < 100; i++ {
_, err = c.PutFile(
dataRepo,
commit.ID,
fmt.Sprintf("file%d", i),
strings.NewReader(strings.Repeat("foo\n", 10000)),
)
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
}
func TestPipelineUniqueness(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
},
},
false,
))
err := c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
},
},
false,
)
require.YesError(t, err)
require.Matches(t, "pipeline .*? already exists", err.Error())
}
func TestPipelineInfoDestroyedIfRepoCreationFails(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreateRepo(pipelineName))
err := c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
},
},
false,
)
require.YesError(t, err)
require.Matches(t, "repo .* exists", err.Error())
_, err = c.InspectPipeline(pipelineName)
require.YesError(t, err)
require.Matches(t, "not found", err.Error())
}
func TestUpdatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestUpdatePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create 2 pipelines
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file1"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(dataRepo)}},
false,
))
pipeline2Name := uniqueString("pipeline2")
require.NoError(t, c.CreatePipeline(
pipeline2Name,
"",
[]string{"cp", path.Join("/pfs", pipelineName, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(pipelineName)}},
false,
))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file1", strings.NewReader("file1\n"))
_, err = c.PutFile(dataRepo, commit.ID, "file2", strings.NewReader("file2\n"))
_, err = c.PutFile(dataRepo, commit.ID, "file3", strings.NewReader("file3\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 3, len(commitInfos))
for _, commitInfo := range commitInfos {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "file1\n", buffer.String())
}
// We archive the temporary commits created per job/pod
// So the total we see here is 2, but 'real' commits is just 1
outputRepoCommitInfos, err := c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusAll, false)
require.NoError(t, err)
require.Equal(t, 2, len(outputRepoCommitInfos))
outputRepoCommitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusNormal, false)
require.NoError(t, err)
require.Equal(t, 1, len(outputRepoCommitInfos))
// Update the pipeline to look at file2
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file2"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
true,
))
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
require.NotNil(t, pipelineInfo.CreatedAt)
commitInfos, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
for _, commitInfo := range commitInfos {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "file2\n", buffer.String())
}
outputRepoCommitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusAll, false)
require.NoError(t, err)
require.Equal(t, 4, len(outputRepoCommitInfos))
// Expect real commits to still be 1
outputRepoCommitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusNormal, false)
require.NoError(t, err)
require.Equal(t, 1, len(outputRepoCommitInfos))
// Update the pipeline to look at file3
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file3"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
true,
))
commitInfos, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
for _, commitInfo := range commitInfos {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "file3\n", buffer.String())
}
outputRepoCommitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusAll, false)
require.NoError(t, err)
require.Equal(t, 6, len(outputRepoCommitInfos))
// Expect real commits to still be 1
outputRepoCommitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusNormal, false)
require.NoError(t, err)
require.Equal(t, 1, len(outputRepoCommitInfos))
commitInfos, _ = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusAll, false)
// Do an update that shouldn't cause archiving
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&ppsclient.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &ppsclient.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file3"), "/pfs/out/file"},
},
ParallelismSpec: &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 2,
},
Inputs: []*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
Update: true,
NoArchive: true,
})
require.NoError(t, err)
commitInfos, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
for _, commitInfo := range commitInfos {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "file3\n", buffer.String())
}
commitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusAll, false)
require.NoError(t, err)
require.Equal(t, 6, len(commitInfos))
// Expect real commits to still be 1
outputRepoCommitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusNormal, false)
require.NoError(t, err)
require.Equal(t, 1, len(outputRepoCommitInfos))
}
func TestStopPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
false,
))
require.NoError(t, c.StopPipeline(pipelineName))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
// timeout because the Flush should never return since the pipeline is
// stopped
ctx, _ := context.WithTimeout(context.Background(), 20*time.Second)
_, err = c.PfsAPIClient.FlushCommit(
ctx,
&pfsclient.FlushCommitRequest{
Commit: []*pfsclient.Commit{commit1},
})
require.YesError(t, err)
require.NoError(t, c.StartPipeline(pipelineName))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{commit1}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, commitInfos[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
}
func TestPipelineEnv(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
// make a secret to reference
k := getKubeClient(t)
secretName := uniqueString("test-secret")
_, err := k.Secrets(api.NamespaceDefault).Create(
&api.Secret{
ObjectMeta: api.ObjectMeta{
Name: secretName,
},
Data: map[string][]byte{
"foo": []byte("foo\n"),
},
},
)
require.NoError(t, err)
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipelineEnv_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&ppsclient.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"ls /var/secret",
"cat /var/secret/foo > /pfs/out/foo",
"echo $bar> /pfs/out/bar",
},
Env: map[string]string{"bar": "bar"},
Secrets: []*ppsclient.Secret{
{
Name: secretName,
MountPath: "/var/secret",
},
},
},
ParallelismSpec: &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
Inputs: []*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
})
require.NoError(t, err)
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.Equal(t, 2, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, commitInfos[0].Commit.ID, "foo", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(pipelineName, commitInfos[0].Commit.ID, "bar", 0, 0, "", false, nil, &buffer))
require.Equal(t, "bar\n", buffer.String())
}
func TestFlushNonExistantCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
_, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit("fake-repo", "fake-commit")}, nil)
require.YesError(t, err)
repo := uniqueString("FlushNonExistantCommit")
require.NoError(t, c.CreateRepo(repo))
_, err = c.FlushCommit([]*pfsclient.Commit{client.NewCommit(repo, "fake-commit")}, nil)
require.YesError(t, err)
}
func TestPipelineWithFullObjects(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: client.NewRepo(dataRepo),
Method: &ppsclient.Method{
Partition: ppsclient.Partition_BLOCK,
Incremental: ppsclient.Incremental_FULL,
},
},
},
false,
))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit(dataRepo, commit1.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
// Do second commit to repo
commit2, err := c.StartCommit(dataRepo, commit1.ID)
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
commitInfos, err = c.FlushCommit([]*pfsclient.Commit{client.NewCommit(dataRepo, commit2.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(commitInfos))
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\nbar\n", buffer.String())
}
func TestArchiveAllWithPipelines(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
// This test cannot be run in parallel, since it archives all repos
c := getUsablePachClient(t)
dataRepo := uniqueString("TestUpdatePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
numPipelines := 10
var outputRepos []*pfsclient.Repo
for i := 0; i < numPipelines; i++ {
pipelineName := uniqueString("pipeline")
outputRepos = append(outputRepos, &pfsclient.Repo{Name: pipelineName})
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file1"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(dataRepo)}},
false,
))
}
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file1", strings.NewReader("file1\n"))
_, err = c.PutFile(dataRepo, commit.ID, "file2", strings.NewReader("file2\n"))
_, err = c.PutFile(dataRepo, commit.ID, "file3", strings.NewReader("file3\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, numPipelines+1, len(commitInfos))
require.NoError(t, c.ArchiveAll())
commitInfos, err = c.ListCommit(
[]*pfsclient.Commit{{
Repo: &pfsclient.Repo{dataRepo},
}},
nil,
client.CommitTypeNone,
client.CommitStatusNormal,
false,
)
require.NoError(t, err)
require.Equal(t, 0, len(commitInfos))
}
// This test / failure pattern shouldn't be possible after
// the pfs-refactor branch lands
func TestListCommitReturnsBlankCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Skip("This test does a restart which seems to break other tests.")
// this test cannot be run in parallel because it restarts everything which breaks other tests.
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestListCommitReturnsBlankCommit")
require.NoError(t, c.CreateRepo(dataRepo))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: &pfsclient.Repo{dataRepo},
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
commitInfos, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
require.Equal(t, 1, len(commitInfos.CommitInfo))
restartAll(t)
// need a new client because the old one will have a defunct connection
c = getUsablePachClient(t)
commitInfos, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
// In the buggy behaviour, after restarting we'd see 2 commits, one of
// which is the 'blank' commit that's created when creating a repo
require.Equal(t, 1, len(commitInfos.CommitInfo))
}
// TestChainedPipelines tracks https://github.com/pachyderm/pachyderm/issues/797
func TestChainedPipelines(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
aRepo := uniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
dRepo := uniqueString("D")
require.NoError(t, c.CreateRepo(dRepo))
aCommit, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, "master"))
dCommit, err := c.StartCommit(dRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dRepo, "master", "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dRepo, "master"))
bPipeline := uniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(aRepo)}},
false,
))
cPipeline := uniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("cp /pfs/%s/file /pfs/out/bFile", bPipeline),
fmt.Sprintf("cp /pfs/%s/file /pfs/out/dFile", dRepo)},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(bPipeline)},
{Repo: client.NewRepo(dRepo)}},
false,
))
results, err := c.FlushCommit([]*pfsclient.Commit{aCommit, dCommit}, nil)
require.NoError(t, err)
require.Equal(t, 4, len(results))
}
func TestParallelismSpec(t *testing.T) {
// Test Constant strategy
parellelism, err := pps_server.GetExpectedNumWorkers(getKubeClient(t), &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 7,
})
require.NoError(t, err)
require.Equal(t, uint64(7), parellelism)
// Coefficient == 1 (basic test)
parellelism, err = pps_server.GetExpectedNumWorkers(getKubeClient(t), &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_COEFFICIENT,
Coefficient: 1,
})
require.NoError(t, err)
require.Equal(t, uint64(1), parellelism)
// Coefficient > 1
parellelism, err = pps_server.GetExpectedNumWorkers(getKubeClient(t), &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_COEFFICIENT,
Coefficient: 2,
})
require.NoError(t, err)
require.Equal(t, uint64(2), parellelism)
// Make sure we start at least one worker
parellelism, err = pps_server.GetExpectedNumWorkers(getKubeClient(t), &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_COEFFICIENT,
Coefficient: 0.1,
})
require.NoError(t, err)
require.Equal(t, uint64(1), parellelism)
// Test 0-initialized JobSpec
parellelism, err = pps_server.GetExpectedNumWorkers(getKubeClient(t), &ppsclient.ParallelismSpec{})
require.NoError(t, err)
require.Equal(t, uint64(1), parellelism)
// Test nil JobSpec
parellelism, err = pps_server.GetExpectedNumWorkers(getKubeClient(t), nil)
require.NoError(t, err)
require.Equal(t, uint64(1), parellelism)
}
func getPachClient(t testing.TB) *client.APIClient {
client, err := client.NewFromAddress("0.0.0.0:30650")
require.NoError(t, err)
return client
}
const (
retries = 10
)
// getUsablePachClient is like getPachClient except it blocks until it gets a
// connection that actually works
func getUsablePachClient(t *testing.T) *client.APIClient {
for i := 0; i < retries; i++ {
client := getPachClient(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
_, err := client.PfsAPIClient.ListRepo(ctx, &pfsclient.ListRepoRequest{})
if err == nil {
return client
}
}
t.Fatalf("failed to connect after %d tries", retries)
return nil
}
func getKubeClient(t *testing.T) *kube.Client {
config := &kube_client.Config{
Host: "0.0.0.0:8080",
Insecure: false,
}
k, err := kube.New(config)
require.NoError(t, err)
return k
}
func uniqueString(prefix string) string {
return prefix + uuid.NewWithoutDashes()[0:12]
}
func pachdRc(t *testing.T) *api.ReplicationController {
k := getKubeClient(t)
rc := k.ReplicationControllers(api.NamespaceDefault)
result, err := rc.Get("pachd")
require.NoError(t, err)
return result
}
// scalePachd scales the number of pachd nodes up or down.
// If up is true, then the number of nodes will be within (n, 2n]
// If up is false, then the number of nodes will be within [1, n)
func scalePachd(t *testing.T, up bool) {
k := getKubeClient(t)
pachdRc := pachdRc(t)
originalReplicas := pachdRc.Spec.Replicas
for {
if up {
pachdRc.Spec.Replicas = originalReplicas + int32(rand.Intn(int(originalReplicas))+1)
} else {
pachdRc.Spec.Replicas = int32(rand.Intn(int(originalReplicas)-1) + 1)
}
if pachdRc.Spec.Replicas != originalReplicas {
break
}
}
fmt.Printf("scaling pachd to %d replicas\n", pachdRc.Spec.Replicas)
rc := k.ReplicationControllers(api.NamespaceDefault)
_, err := rc.Update(pachdRc)
require.NoError(t, err)
waitForReadiness(t)
// Unfortunately, even when all pods are ready, the cluster membership
// protocol might still be running, thus PFS API calls might fail. So
// we wait a little bit for membership to stablize.
time.Sleep(15 * time.Second)
}
func scalePachdUp(t *testing.T) {
scalePachd(t, true)
}
func scalePachdDown(t *testing.T) {
scalePachd(t, false)
}
func waitForReadiness(t *testing.T) {
k := getKubeClient(t)
rc := pachdRc(t)
for {
has, err := kube.ControllerHasDesiredReplicas(k, rc)()
require.NoError(t, err)
if has {
break
}
time.Sleep(time.Second * 5)
}
watch, err := k.Pods(api.NamespaceDefault).Watch(api.ListOptions{
LabelSelector: kube_labels.SelectorFromSet(map[string]string{"app": "pachd"}),
})
defer watch.Stop()
require.NoError(t, err)
readyPods := make(map[string]bool)
for event := range watch.ResultChan() {
ready, err := kube.PodRunningAndReady(event)
require.NoError(t, err)
if ready {
pod, ok := event.Object.(*api.Pod)
if !ok {
t.Fatal("event.Object should be an object")
}
readyPods[pod.Name] = true
if len(readyPods) == int(rc.Spec.Replicas) {
break
}
}
}
}
func restartAll(t *testing.T) {
k := getKubeClient(t)
podsInterface := k.Pods(api.NamespaceDefault)
labelSelector, err := labels.Parse("suite=pachyderm")
require.NoError(t, err)
podList, err := podsInterface.List(
api.ListOptions{
LabelSelector: labelSelector,
})
require.NoError(t, err)
for _, pod := range podList.Items {
require.NoError(t, podsInterface.Delete(pod.Name, api.NewDeleteOptions(0)))
}
waitForReadiness(t)
}
func restartOne(t *testing.T) {
k := getKubeClient(t)
podsInterface := k.Pods(api.NamespaceDefault)
labelSelector, err := labels.Parse("app=pachd")
require.NoError(t, err)
podList, err := podsInterface.List(
api.ListOptions{
LabelSelector: labelSelector,
})
require.NoError(t, err)
require.NoError(t, podsInterface.Delete(podList.Items[rand.Intn(len(podList.Items))].Name, api.NewDeleteOptions(0)))
waitForReadiness(t)
}
Fix a bunch of tests that failed due to change in FlushCommit
package server
import (
"bytes"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/pachyderm/pachyderm"
"github.com/pachyderm/pachyderm/src/client"
pfsclient "github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/require"
"github.com/pachyderm/pachyderm/src/client/pkg/uuid"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
pfspretty "github.com/pachyderm/pachyderm/src/server/pfs/pretty"
"github.com/pachyderm/pachyderm/src/server/pkg/workload"
ppsserver "github.com/pachyderm/pachyderm/src/server/pps"
ppspretty "github.com/pachyderm/pachyderm/src/server/pps/pretty"
pps_server "github.com/pachyderm/pachyderm/src/server/pps/server"
"go.pedge.io/proto/time"
"golang.org/x/net/context"
"k8s.io/kubernetes/pkg/api"
kube_client "k8s.io/kubernetes/pkg/client/restclient"
kube "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/labels"
kube_labels "k8s.io/kubernetes/pkg/labels"
)
const (
NUMFILES = 25
KB = 1024
)
func TestJob(t *testing.T) {
t.Parallel()
testJob(t, 4)
}
func TestJobNoShard(t *testing.T) {
t.Parallel()
testJob(t, 0)
}
func testJob(t *testing.T, shards int) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
dataRepo := uniqueString("TestJob_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
fileContent := "foo\n"
// We want to create lots of files so that each parallel job will be
// started with some files
numFiles := shards*100 + 100
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit.ID, fmt.Sprintf("file-%d", i), strings.NewReader(fileContent))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
job, err := c.CreateJob(
"",
[]string{"bash"},
[]string{fmt.Sprintf("cp %s %s", path.Join("/pfs", dataRepo, "*"), "/pfs/out")},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: uint64(shards),
},
[]*ppsclient.JobInput{{
Commit: commit,
Method: client.ReduceMethod,
}},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
parellelism, err := pps_server.GetExpectedNumWorkers(getKubeClient(t), jobInfo.ParallelismSpec)
require.NoError(t, err)
require.True(t, parellelism > 0)
commitInfo, err := c.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID)
require.NoError(t, err)
require.Equal(t, pfsclient.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType)
require.NotNil(t, jobInfo.Started)
require.NotNil(t, jobInfo.Finished)
require.True(t, prototime.TimestampToTime(jobInfo.Finished).After(prototime.TimestampToTime(jobInfo.Started)))
for i := 0; i < numFiles; i++ {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, fmt.Sprintf("file-%d", i), 0, 0, "", false, nil, &buffer))
require.Equal(t, fileContent, buffer.String())
}
}
func TestPachCommitIdEnvVarInJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
shards := 0
c := getPachClient(t)
repos := []string{
uniqueString("TestJob_FriarTuck"),
uniqueString("TestJob_RobinHood"),
}
var commits []*pfsclient.Commit
for _, repo := range repos {
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
fileContent := "foo\n"
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader(fileContent))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
commits = append(commits, commit)
}
job, err := c.CreateJob(
"",
[]string{"bash"},
[]string{
fmt.Sprintf("echo $PACH_%v_COMMIT_ID > /pfs/out/input-id-%v", pps_server.RepoNameToEnvString(repos[0]), repos[0]),
fmt.Sprintf("echo $PACH_%v_COMMIT_ID > /pfs/out/input-id-%v", pps_server.RepoNameToEnvString(repos[1]), repos[1]),
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: uint64(shards),
},
[]*ppsclient.JobInput{
{
Commit: commits[0],
Method: client.ReduceMethod,
},
{
Commit: commits[1],
Method: client.ReduceMethod,
},
},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
parallelism, err := pps_server.GetExpectedNumWorkers(getKubeClient(t), jobInfo.ParallelismSpec)
require.NoError(t, err)
require.True(t, parallelism > 0)
commitInfo, err := c.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID)
require.NoError(t, err)
require.Equal(t, pfsclient.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, fmt.Sprintf("input-id-%v", repos[0]), 0, 0, "", false, nil, &buffer))
require.Equal(t, jobInfo.Inputs[0].Commit.ID, strings.TrimSpace(buffer.String()))
buffer.Reset()
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, fmt.Sprintf("input-id-%v", repos[1]), 0, 0, "", false, nil, &buffer))
require.Equal(t, jobInfo.Inputs[1].Commit.ID, strings.TrimSpace(buffer.String()))
}
func TestDuplicatedJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
dataRepo := uniqueString("TestDuplicatedJob_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
fileContent := "foo\n"
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader(fileContent))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipelineName := uniqueString("TestDuplicatedJob_pipeline")
_, err = c.PfsAPIClient.CreateRepo(
context.Background(),
&pfsclient.CreateRepoRequest{
Repo: client.NewRepo(pipelineName),
Provenance: []*pfsclient.Repo{client.NewRepo(dataRepo)},
},
)
require.NoError(t, err)
cmd := []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}
// Now we manually create the same job
req := &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: cmd,
},
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
Inputs: []*ppsclient.JobInput{{
Commit: commit,
}},
}
job1, err := c.PpsAPIClient.CreateJob(context.Background(), req)
require.NoError(t, err)
job2, err := c.PpsAPIClient.CreateJob(context.Background(), req)
require.NoError(t, err)
require.Equal(t, job1, job2)
req.Force = true
job3, err := c.PpsAPIClient.CreateJob(context.Background(), req)
require.NoError(t, err)
require.NotEqual(t, job1, job3)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, fileContent, buffer.String())
}
func TestLogs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
job, err := c.CreateJob(
"",
[]string{"echo", "foo"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 4,
},
[]*ppsclient.JobInput{},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
_, err = c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
// TODO we Sleep here because even though the job has completed kubernetes
// might not have even noticed the container was created yet
time.Sleep(10 * time.Second)
var buffer bytes.Buffer
require.NoError(t, c.GetLogs(job.ID, &buffer))
require.Equal(t, "0 | foo\n1 | foo\n2 | foo\n3 | foo\n", buffer.String())
// Should get an error if the job does not exist
require.YesError(t, c.GetLogs("nonexistent", &buffer))
}
func TestGrep(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
dataRepo := uniqueString("TestGrep_data")
c := getPachClient(t)
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < 100; i++ {
_, err = c.PutFile(dataRepo, commit.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo\nbar\nfizz\nbuzz\n"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
job1, err := c.CreateJob(
"",
[]string{"bash"},
[]string{fmt.Sprintf("grep foo /pfs/%s/* >/pfs/out/foo", dataRepo)},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.JobInput{{Commit: commit}},
"",
)
require.NoError(t, err)
job2, err := c.CreateJob(
"",
[]string{"bash"},
[]string{fmt.Sprintf("grep foo /pfs/%s/* >/pfs/out/foo", dataRepo)},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 4,
},
[]*ppsclient.JobInput{{Commit: commit}},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
job1Info, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
inspectJobRequest.Job = job2
job2Info, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
repo1Info, err := c.InspectRepo(job1Info.OutputCommit.Repo.Name)
require.NoError(t, err)
repo2Info, err := c.InspectRepo(job2Info.OutputCommit.Repo.Name)
require.NoError(t, err)
require.Equal(t, repo1Info.SizeBytes, repo2Info.SizeBytes)
}
func TestJobLongOutputLine(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
job, err := c.CreateJob(
"",
[]string{"sh"},
[]string{"yes | tr -d '\\n' | head -c 1000000 > /pfs/out/file"},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.JobInput{},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
}
func TestPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{
Repo: &pfsclient.Repo{Name: dataRepo},
Method: client.MapMethod,
}},
false,
))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{&pfsclient.Commit{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
// Do second commit to repo
commit2, err := c.StartCommit(dataRepo, commit1.ID)
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{outCommits[0].Commit},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
require.NotNil(t, listCommitResponse.CommitInfo[0].ParentCommit)
require.Equal(t, outCommits[0].Commit.ID, listCommitResponse.CommitInfo[0].ParentCommit.ID)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "bar\n", buffer.String())
require.NoError(t, c.DeletePipeline(pipelineName))
pipelineInfos, err := c.PpsAPIClient.ListPipeline(context.Background(), &ppsclient.ListPipelineRequest{})
require.NoError(t, err)
for _, pipelineInfo := range pipelineInfos.PipelineInfo {
require.True(t, pipelineInfo.Pipeline.Name != pipelineName)
}
// Do third commit to repo; this time pipeline should not run since it's been deleted
commit3, err := c.StartCommit(dataRepo, commit2.ID)
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit3.ID, "file", strings.NewReader("buzz\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit3.ID))
// We will sleep a while to wait for the pipeline to actually get cancelled
// Also if the pipeline didn't get cancelled (due to a bug), we sleep a while
// to let the pipeline commit
time.Sleep(5 * time.Second)
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{&pfsclient.Commit{
Repo: outRepo,
}},
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
// there should only be two commits in the pipeline
require.Equal(t, 2, len(listCommitResponse.CommitInfo))
}
func TestPipelineWithEmptyInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repo
dataRepo := uniqueString("data")
require.NoError(t, c.CreateRepo(dataRepo))
// create a pipeline that doesn't run with empty commits
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{
"echo foo > /pfs/out/file",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{
Repo: &pfsclient.Repo{Name: dataRepo},
RunEmpty: false,
}},
false,
))
// Add first empty commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
require.Equal(t, 0, int(outCommits[0].SizeBytes))
// An empty job should've been created
jobInfos, err := c.ListJob(pipelineName, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
require.Equal(t, ppsclient.JobState_JOB_EMPTY, jobInfos[0].State)
// Make another empty commit in the input repo
// The output commit should have the previous output commit as its parent
parentOutputCommit := outCommits[0].Commit
commit2, err := c.StartCommit(dataRepo, commit1.ID)
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{parentOutputCommit},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
require.Equal(t, 0, int(outCommits[0].SizeBytes))
require.Equal(t, parentOutputCommit.ID, outCommits[0].ParentCommit.ID)
jobInfos, err = c.ListJob(pipelineName, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobInfos))
require.Equal(t, ppsclient.JobState_JOB_EMPTY, jobInfos[1].State)
// create a pipeline that runs with empty commits
dataRepo = uniqueString("data")
require.NoError(t, c.CreateRepo(dataRepo))
pipelineName = uniqueString("pipeline")
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
outRepo = ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{
"echo foo > /pfs/out/file",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{
Repo: &pfsclient.Repo{Name: dataRepo},
RunEmpty: true,
}},
false,
))
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
require.Equal(t, len("foo\n"), int(outCommits[0].SizeBytes))
jobInfos, err = c.ListJob(pipelineName, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
}
func TestPipelineWithTooMuchParallelism(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipelineWithTooMuchParallelism_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
// This pipeline will fail if any pod sees empty input, since cp won't
// be able to find the file.
// We have parallelism set to 3 so that if we actually start 3 pods,
// which would be a buggy behavior, some jobs don't see any files
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
[]*ppsclient.PipelineInput{{
Repo: &pfsclient.Repo{Name: dataRepo},
// Use reduce method so only one pod gets the file
Method: client.ReduceMethod,
}},
false,
))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
require.Equal(t, false, outCommits[0].Cancelled)
}
func TestPipelineWithNoInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"NEW_UUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)",
"echo foo > /pfs/out/$NEW_UUID",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
nil,
false,
))
// Manually trigger the pipeline
job, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
parallelism, err := pps_server.GetExpectedNumWorkers(getKubeClient(t), jobInfo.ParallelismSpec)
require.NoError(t, err)
require.Equal(t, 3, int(parallelism))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
fileInfos, err := c.ListFile(outRepo.Name, outCommits[0].Commit.ID, "", "", false, nil, false)
require.NoError(t, err)
require.Equal(t, 3, len(fileInfos))
// Make sure that each job gets a different ID
job2, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
require.True(t, job.ID != job2.ID)
}
func TestPipelineThatWritesToOneFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"dd if=/dev/zero of=/pfs/out/file bs=10 count=1",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
nil,
false,
))
// Manually trigger the pipeline
_, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, 30, buffer.Len())
}
func TestPipelineThatOverwritesFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"echo foo > /pfs/out/file",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
nil,
false,
))
// Manually trigger the pipeline
job, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\n", buffer.String())
// Manually trigger the pipeline
_, err = c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
ParentJob: job,
})
require.NoError(t, err)
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{outCommits[0].Commit},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer2))
// we expect only 3 foos here because > _overwrites_ rather than appending.
// Appending is done with >>.
require.Equal(t, "foo\nfoo\nfoo\n", buffer2.String())
}
func TestPipelineThatAppendsToFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"echo foo >> /pfs/out/file",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
nil,
false,
))
// Manually trigger the pipeline
job, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: outRepo,
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\n", buffer.String())
// Manually trigger the pipeline
_, err = c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
ParentJob: job,
})
require.NoError(t, err)
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{outCommits[0].Commit},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer2))
require.Equal(t, "foo\nfoo\nfoo\nfoo\nfoo\nfoo\n", buffer2.String())
}
func TestRemoveAndAppend(t *testing.T) {
testParellelRemoveAndAppend(t, 1)
}
func TestParellelRemoveAndAppend(t *testing.T) {
// This test does not pass on Travis which is why it's skipped right now As
// soon as we have a hypothesis for why this fails on travis but not
// locally we should un skip this test and try to fix it.
testParellelRemoveAndAppend(t, 3)
}
func testParellelRemoveAndAppend(t *testing.T, parallelism int) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel() //cleanup resources
job1, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"echo foo > /pfs/out/file",
},
},
ParallelismSpec: &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: uint64(parallelism),
},
})
require.NoError(t, err)
inspectJobRequest1 := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
jobInfo1, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest1)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo1.State)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo1.OutputCommit.Repo.Name, jobInfo1.OutputCommit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, strings.Repeat("foo\n", parallelism), buffer.String())
job2, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"unlink /pfs/out/file && echo bar > /pfs/out/file",
},
},
ParallelismSpec: &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: uint64(parallelism),
},
ParentJob: job1,
})
require.NoError(t, err)
inspectJobRequest2 := &ppsclient.InspectJobRequest{
Job: job2,
BlockState: true,
}
jobInfo2, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest2)
require.NoError(t, err)
c.GetLogs(jobInfo2.Job.ID, os.Stdout)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo2.State)
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(jobInfo2.OutputCommit.Repo.Name, jobInfo2.OutputCommit.ID, "file", 0, 0, "", false, nil, &buffer2))
require.Equal(t, strings.Repeat("bar\n", parallelism), buffer2.String())
}
func TestWorkload(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
seed := time.Now().UnixNano()
require.NoError(t, workload.RunWorkload(c, rand.New(rand.NewSource(seed)), 100))
}
func TestSharding(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
repo := uniqueString("TestSharding")
c := getPachClient(t)
err := c.CreateRepo(repo)
require.NoError(t, err)
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
var wg sync.WaitGroup
for i := 0; i < NUMFILES; i++ {
i := i
wg.Add(1)
go func() {
defer wg.Done()
rand := rand.New(rand.NewSource(int64(i)))
_, err = c.PutFile(repo, commit.ID, fmt.Sprintf("file%d", i), workload.NewReader(rand, KB))
require.NoError(t, err)
}()
}
wg.Wait()
err = c.FinishCommit(repo, commit.ID)
require.NoError(t, err)
wg = sync.WaitGroup{}
for i := 0; i < NUMFILES; i++ {
i := i
wg.Add(1)
go func() {
defer wg.Done()
var buffer1Shard bytes.Buffer
var buffer4Shard bytes.Buffer
shard := &pfsclient.Shard{FileModulus: 1, BlockModulus: 1}
err := c.GetFile(repo, commit.ID,
fmt.Sprintf("file%d", i), 0, 0, "", false, shard, &buffer1Shard)
require.NoError(t, err)
shard.BlockModulus = 4
for blockNumber := uint64(0); blockNumber < 4; blockNumber++ {
shard.BlockNumber = blockNumber
c.GetFile(repo, commit.ID, fmt.Sprintf("file%d", i), 0, 0, "", false, shard, &buffer4Shard)
}
require.Equal(t, buffer1Shard.Len(), buffer4Shard.Len())
}()
}
wg.Wait()
}
func TestFromCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
repo := uniqueString("TestFromCommit")
c := getPachClient(t)
seed := time.Now().UnixNano()
rand := rand.New(rand.NewSource(seed))
err := c.CreateRepo(repo)
require.NoError(t, err)
commit1, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "file", workload.NewReader(rand, KB))
require.NoError(t, err)
err = c.FinishCommit(repo, commit1.ID)
require.NoError(t, err)
commit2, err := c.StartCommit(repo, commit1.ID)
require.NoError(t, err)
_, err = c.PutFile(repo, commit2.ID, "file", workload.NewReader(rand, KB))
require.NoError(t, err)
err = c.FinishCommit(repo, commit2.ID)
require.NoError(t, err)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(repo, commit2.ID, "file", 0, 0, commit1.ID, false, nil, &buffer))
require.Equal(t, buffer.Len(), KB)
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(repo, commit2.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, buffer.Len(), 2*KB)
}
func TestSimple(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("TestSimple")
require.NoError(t, c.CreateRepo(repo))
commit1, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "foo", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit1.ID))
commitInfos, err := c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{repo},
}}, nil, client.CommitTypeNone, pfsclient.CommitStatus_NORMAL, false)
require.NoError(t, err)
require.Equal(t, 1, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(repo, commit1.ID, "foo", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
commit2, err := c.StartCommit(repo, commit1.ID)
require.NoError(t, err)
_, err = c.PutFile(repo, commit2.ID, "foo", strings.NewReader("foo\n"))
require.NoError(t, err)
err = c.FinishCommit(repo, commit2.ID)
require.NoError(t, err)
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(repo, commit1.ID, "foo", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(repo, commit2.ID, "foo", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\nfoo\n", buffer.String())
}
func TestPipelineWithMultipleInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
inputRepo1 := uniqueString("inputRepo")
require.NoError(t, c.CreateRepo(inputRepo1))
inputRepo2 := uniqueString("inputRepo")
require.NoError(t, c.CreateRepo(inputRepo2))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{fmt.Sprintf(`
repo1=%s
repo2=%s
echo $repo1
ls -1 /pfs/$repo1
echo $repo2
ls -1 /pfs/$repo2
for f1 in /pfs/$repo1/*
do
for f2 in /pfs/$repo2/*
do
v1=$(<$f1)
v2=$(<$f2)
echo $v1$v2 >> /pfs/out/file
done
done
`, inputRepo1, inputRepo2)},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 4,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: inputRepo1},
Method: client.IncrementalReduceMethod,
},
{
Repo: &pfsclient.Repo{Name: inputRepo2},
Method: client.IncrementalReduceMethod,
},
},
false,
))
content := "foo"
numfiles := 10
commit1, err := c.StartCommit(inputRepo1, "master")
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo1, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo1, commit1.ID))
commit2, err := c.StartCommit(inputRepo2, "master")
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo2, commit2.ID, fmt.Sprintf("file%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo2, commit2.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
fileInfos, err := c.ListFile(pipelineName, outCommits[0].Commit.ID, "", "", false, nil, false)
require.NoError(t, err)
require.Equal(t, 1, len(fileInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
lines := strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, numfiles*numfiles, len(lines))
for _, line := range lines {
require.Equal(t, len(content)*2, len(line))
}
commit3, err := c.StartCommit(inputRepo1, commit1.ID)
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo1, commit3.ID, fmt.Sprintf("file2-%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo1, commit3.ID))
listCommitRequest.FromCommits = append(listCommitRequest.FromCommits, outCommits[0].Commit)
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
lines = strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, 2*numfiles*numfiles, len(lines))
for _, line := range lines {
require.Equal(t, len(content)*2, len(line))
}
commit4, err := c.StartCommit(inputRepo2, commit2.ID)
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo2, commit4.ID, fmt.Sprintf("file2-%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo2, commit4.ID))
listCommitRequest.FromCommits[0] = outCommits[0].Commit
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
lines = strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, 4*numfiles*numfiles, len(lines))
for _, line := range lines {
require.Equal(t, len(content)*2, len(line))
}
}
func TestPipelineWithGlobalMethod(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
globalRepo := uniqueString("inputRepo")
require.NoError(t, c.CreateRepo(globalRepo))
numfiles := 20
pipelineName := uniqueString("pipeline")
parallelism := 2
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
// this script simply outputs the number of files under the global repo
[]string{fmt.Sprintf(`
numfiles=(/pfs/%s/*)
numfiles=${#numfiles[@]}
echo $numfiles > /pfs/out/file
`, globalRepo)},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: uint64(parallelism),
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: globalRepo},
Method: client.GlobalMethod,
},
},
false,
))
content := "foo"
commit, err := c.StartCommit(globalRepo, "master")
require.NoError(t, err)
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(globalRepo, commit.ID, fmt.Sprintf("file%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(globalRepo, commit.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
fileInfos, err := c.ListFile(pipelineName, outCommits[0].Commit.ID, "", "", false, nil, false)
require.NoError(t, err)
require.Equal(t, 1, len(fileInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
lines := strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, parallelism, len(lines)) // each job outputs one line
for _, line := range lines {
require.Equal(t, fmt.Sprintf("%d", numfiles), line)
}
}
func TestPipelineWithPrevRepoAndIncrementalReduceMethod(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("repo")
require.NoError(t, c.CreateRepo(repo))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{fmt.Sprintf(`
cat /pfs/%s/file >>/pfs/out/file
if [ -d "/pfs/prev" ]; then
cat /pfs/prev/file >>/pfs/out/file
fi
`, repo)},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
Method: client.IncrementalReduceMethod,
},
},
false,
))
commit1, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, c.FinishCommit(repo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
lines := strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, 1, len(lines))
require.Equal(t, "foo", lines[0])
commit2, err := c.StartCommit(repo, commit1.ID)
require.NoError(t, err)
_, err = c.PutFile(repo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, c.FinishCommit(repo, commit2.ID))
listCommitRequest = &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{outCommits[0].Commit},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer2))
lines = strings.Split(strings.TrimSpace(buffer2.String()), "\n")
require.Equal(t, 3, len(lines))
require.Equal(t, "foo", lines[0])
require.Equal(t, "bar", lines[1])
require.Equal(t, "foo", lines[2])
}
func TestPipelineThatUseNonexistentInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
pipelineName := uniqueString("pipeline")
require.YesError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: "nonexistent"},
},
},
false,
))
}
func TestPipelineWhoseInputsGetDeleted(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("repo")
require.NoError(t, c.CreateRepo(repo))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{"true"},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
},
},
false,
))
// Shouldn't be able to delete the input repo because it's the provenance
// of the output repo.
require.YesError(t, c.DeleteRepo(repo, false))
// The correct flow to delete the input repo
require.NoError(t, c.DeletePipeline(pipelineName))
require.NoError(t, c.DeleteRepo(pipelineName, false))
require.NoError(t, c.DeleteRepo(repo, false))
}
// This test fails if you updated some static assets (such as doc/pipeline_spec.md)
// that are used in code but forgot to run:
// $ make assets
func TestAssets(t *testing.T) {
assetPaths := []string{"doc/pipeline_spec.md"}
for _, path := range assetPaths {
doc, err := ioutil.ReadFile(filepath.Join(os.Getenv("GOPATH"), "src/github.com/pachyderm/pachyderm/", path))
if err != nil {
t.Fatal(err)
}
asset, err := pachyderm.Asset(path)
if err != nil {
t.Fatal(err)
}
require.Equal(t, doc, asset)
}
}
// TestProvenance creates a pipeline DAG that's not a transitive reduction
// It looks like this:
// A
// | \
// v v
// B-->C
// When we commit to A we expect to see 1 commit on C rather than 2.
func TestProvenance(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
aRepo := uniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
bPipeline := uniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(aRepo)}},
false,
))
cPipeline := uniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("diff %s %s >/pfs/out/file",
path.Join("/pfs", aRepo, "file"), path.Join("/pfs", bPipeline, "file"))},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{Repo: client.NewRepo(aRepo)},
{Repo: client.NewRepo(bPipeline)},
},
false,
))
// commit to aRepo
commit1, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit1.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit(aRepo, commit1.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 3, len(commitInfos))
commit2, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit2.ID))
commitInfos, err = c.FlushCommit([]*pfsclient.Commit{client.NewCommit(aRepo, commit2.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
// There should only be 2 commits on cRepo
commitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{cPipeline},
}}, nil, pfsclient.CommitType_COMMIT_TYPE_READ, pfsclient.CommitStatus_NORMAL, false)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
for _, commitInfo := range commitInfos {
// C takes the diff of 2 files that should always be the same, so we
// expect an empty file
fileInfo, err := c.InspectFile(cPipeline, commitInfo.Commit.ID, "file", "", false, nil)
require.NoError(t, err)
require.Equal(t, uint64(0), fileInfo.SizeBytes)
}
}
func TestDirectory(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel() //cleanup resources
job1, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"mkdir /pfs/out/dir",
"echo foo >> /pfs/out/dir/file",
},
},
ParallelismSpec: &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
})
require.NoError(t, err)
inspectJobRequest1 := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
jobInfo1, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest1)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo1.State)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo1.OutputCommit.Repo.Name, jobInfo1.OutputCommit.ID, "dir/file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\n", buffer.String())
job2, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"echo bar >> /pfs/out/dir/file",
},
},
ParallelismSpec: &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 3,
},
ParentJob: job1,
})
require.NoError(t, err)
inspectJobRequest2 := &ppsclient.InspectJobRequest{
Job: job2,
BlockState: true,
}
jobInfo2, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest2)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo2.State)
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(jobInfo2.OutputCommit.Repo.Name, jobInfo2.OutputCommit.ID, "dir/file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\nbar\nbar\nbar\n", buffer.String())
}
func TestFailedJobReadData(t *testing.T) {
// We want to enable users to be able to read data from cancelled commits for debugging purposes`
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
shards := 0
c := getPachClient(t)
repo := uniqueString("TestJob_Foo")
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
fileContent := "foo\n"
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader(fileContent))
require.NoError(t, err)
err = c.FinishCommit(repo, commit.ID)
require.NoError(t, err)
job, err := c.CreateJob(
"",
[]string{"bash"},
[]string{
"echo fubar > /pfs/out/file",
"exit 1",
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: uint64(shards),
},
[]*ppsclient.JobInput{
{
Commit: commit,
Method: client.ReduceMethod,
},
},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_FAILURE.String(), jobInfo.State.String())
parallelism, err := pps_server.GetExpectedNumWorkers(getKubeClient(t), jobInfo.ParallelismSpec)
require.NoError(t, err)
require.True(t, parallelism > 0)
c.GetLogs(jobInfo.Job.ID, os.Stdout)
commitInfo, err := c.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID)
require.NoError(t, err)
require.Equal(t, pfsclient.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType)
require.Equal(t, true, commitInfo.Cancelled)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "fubar", strings.TrimSpace(buffer.String()))
}
// TestFlushCommit
func TestFlushCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
prefix := uniqueString("repo")
makeRepoName := func(i int) string {
return fmt.Sprintf("%s_%d", prefix, i)
}
sourceRepo := makeRepoName(0)
require.NoError(t, c.CreateRepo(sourceRepo))
// Create a five-stage pipeline
numStages := 5
for i := 0; i < numStages; i++ {
repo := makeRepoName(i)
require.NoError(t, c.CreatePipeline(
makeRepoName(i+1),
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
false,
))
}
test := func(parent string) string {
commit, err := c.StartCommit(sourceRepo, parent)
require.NoError(t, err)
_, err = c.PutFile(sourceRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(sourceRepo, commit.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil)
require.NoError(t, err)
require.Equal(t, numStages+1, len(commitInfos))
return commit.ID
}
// Run the test twice, once on a orphan commit and another on
// a commit with a parent
commit := test(uuid.New())
test(commit)
}
func TestFlushCommitAfterCreatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("TestFlushCommitAfterCreatePipeline")
require.NoError(t, c.CreateRepo(repo))
for i := 0; i < 10; i++ {
_, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, "master", "file", strings.NewReader(fmt.Sprintf("foo%d\n", i)))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, "master"))
}
pipeline := uniqueString("TestFlushCommitAfterCreatePipelinePipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
false,
))
_, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit(repo, "master")}, nil)
require.NoError(t, err)
}
// TestFlushCommitWithFailure is similar to TestFlushCommit except that
// the pipeline is designed to fail
func TestFlushCommitWithFailure(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
prefix := uniqueString("repo")
makeRepoName := func(i int) string {
return fmt.Sprintf("%s_%d", prefix, i)
}
sourceRepo := makeRepoName(0)
require.NoError(t, c.CreateRepo(sourceRepo))
// Create a five-stage pipeline; the third stage is designed to fail
numStages := 5
for i := 0; i < numStages; i++ {
fileName := "file"
if i == 3 {
fileName = "nonexistent"
}
repo := makeRepoName(i)
require.NoError(t, c.CreatePipeline(
makeRepoName(i+1),
"",
[]string{"cp", path.Join("/pfs", repo, fileName), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
false,
))
}
commit, err := c.StartCommit(sourceRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(sourceRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(sourceRepo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil)
require.YesError(t, err)
}
// TestRecreatePipeline tracks #432
func TestRecreatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
pipeline := uniqueString("pipeline")
createPipeline := func() {
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
false,
))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipeline},
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
}
// Do it twice. We expect jobs to be created on both runs.
createPipeline()
require.NoError(t, c.DeleteRepo(pipeline, false))
require.NoError(t, c.DeletePipeline(pipeline))
createPipeline()
}
func TestPipelineState(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Skip("after the refactor, it's a little unclear how you'd introduce an error into a pipeline; see #762")
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipeline := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
false,
))
time.Sleep(5 * time.Second) // wait for this pipeline to get picked up
pipelineInfo, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, ppsclient.PipelineState_PIPELINE_RUNNING, pipelineInfo.State)
// Now we introduce an error to the pipeline by removing its output repo
// and starting a job
require.NoError(t, c.DeleteRepo(pipeline, false))
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
// So the state of the pipeline will alternate between running and
// restarting. We just want to make sure that it has definitely restarted.
var states []interface{}
for i := 0; i < 20; i++ {
time.Sleep(500 * time.Millisecond)
pipelineInfo, err = c.InspectPipeline(pipeline)
require.NoError(t, err)
states = append(states, pipelineInfo.State)
}
require.EqualOneOf(t, states, ppsclient.PipelineState_PIPELINE_RESTARTING)
}
func TestPipelineJobCounts(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipeline := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
false,
))
// Trigger a job by creating a commit
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
jobInfos, err := c.ListJob(pipeline, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: jobInfos[0].Job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
_, err = c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
// check that the job has been accounted for
pipelineInfo, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, int32(1), pipelineInfo.JobCounts[int32(ppsclient.JobState_JOB_SUCCESS)])
}
func TestJobState(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// This job uses a nonexistent image; it's supposed to stay in the
// "pulling" state
job, err := c.CreateJob(
"nonexistent",
[]string{"bash"},
nil,
&ppsclient.ParallelismSpec{},
nil,
"",
)
require.NoError(t, err)
time.Sleep(10 * time.Second)
jobInfo, err := c.InspectJob(job.ID, false)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_PULLING, jobInfo.State)
// This job sleeps for 20 secs
job, err = c.CreateJob(
"",
[]string{"bash"},
[]string{"sleep 20"},
&ppsclient.ParallelismSpec{},
nil,
"",
)
require.NoError(t, err)
time.Sleep(10 * time.Second)
jobInfo, err = c.InspectJob(job.ID, false)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_RUNNING, jobInfo.State)
// Wait for the job to complete
jobInfo, err = c.InspectJob(job.ID, true)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo.State)
}
func TestClusterFunctioningAfterMembershipChange(t *testing.T) {
t.Skip("this test is flaky")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
scalePachd(t, true)
testJob(t, 4)
scalePachd(t, false)
testJob(t, 4)
}
func TestDeleteAfterMembershipChange(t *testing.T) {
t.Skip("this test is flaky")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
test := func(up bool) {
repo := uniqueString("TestDeleteAfterMembershipChange")
c := getPachClient(t)
require.NoError(t, c.CreateRepo(repo))
_, err := c.StartCommit(repo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, "master"))
scalePachd(t, up)
c = getUsablePachClient(t)
require.NoError(t, c.DeleteRepo(repo, false))
}
test(true)
test(false)
}
func TestScrubbedErrors(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
_, err := c.InspectPipeline("blah")
require.Equal(t, "PipelineInfos blah not found", err.Error())
err = c.CreatePipeline(
"lskdjf$#%^ERTYC",
"",
[]string{},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: "test"}}},
false,
)
require.Equal(t, "repo test not found", err.Error())
_, err = c.CreateJob("askjdfhgsdflkjh", []string{}, []string{}, &ppsclient.ParallelismSpec{}, []*ppsclient.JobInput{client.NewJobInput("bogusRepo", "bogusCommit", client.DefaultMethod)}, "")
require.Matches(t, "could not create repo job_.*, not all provenance repos exist", err.Error())
_, err = c.InspectJob("blah", true)
require.Equal(t, "JobInfos blah not found", err.Error())
home := os.Getenv("HOME")
f, err := os.Create(filepath.Join(home, "/tmpfile"))
defer func() {
os.Remove(filepath.Join(home, "/tmpfile"))
}()
require.NoError(t, err)
err = c.GetLogs("bogusJobId", f)
require.Equal(t, "job bogusJobId not found", err.Error())
}
func TestLeakingRepo(t *testing.T) {
// If CreateJob fails, it should also destroy the output repo it creates
// If it doesn't, it can cause flush commit to fail, as a bogus repo will
// be listed in the output repo's provenance
// This test can't be run in parallel, since it requires using the repo counts as controls
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
repoInfos, err := c.ListRepo(nil)
require.NoError(t, err)
initialCount := len(repoInfos)
_, err = c.CreateJob("bogusImage", []string{}, []string{}, &ppsclient.ParallelismSpec{}, []*ppsclient.JobInput{client.NewJobInput("bogusRepo", "bogusCommit", client.DefaultMethod)}, "")
require.Matches(t, "could not create repo job_.*, not all provenance repos exist", err.Error())
repoInfos, err = c.ListRepo(nil)
require.NoError(t, err)
require.Equal(t, initialCount, len(repoInfos))
}
func TestAcceptReturnCode(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
job, err := c.PpsAPIClient.CreateJob(
context.Background(),
&ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{"exit 1"},
AcceptReturnCode: []int64{1},
},
ParallelismSpec: &ppsclient.ParallelismSpec{},
},
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
}
func TestRestartAll(t *testing.T) {
t.Skip("this test is flaky")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
// this test cannot be run in parallel because it restarts everything which breaks other tests.
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestRestartAll_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
false,
))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
restartAll(t)
// need a new client because the old one will have a defunct connection
c = getUsablePachClient(t)
// Wait a little for pipelines to restart
time.Sleep(10 * time.Second)
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
require.Equal(t, ppsclient.PipelineState_PIPELINE_RUNNING, pipelineInfo.State)
_, err = c.InspectRepo(dataRepo)
require.NoError(t, err)
_, err = c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
}
func TestRestartOne(t *testing.T) {
t.Skip("this test is flaky")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
// this test cannot be run in parallel because it restarts everything which breaks other tests.
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestRestartOne_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
false,
))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
restartOne(t)
// need a new client because the old one will have a defunct connection
c = getUsablePachClient(t)
_, err = c.InspectPipeline(pipelineName)
require.NoError(t, err)
_, err = c.InspectRepo(dataRepo)
require.NoError(t, err)
_, err = c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
}
func TestPrettyPrinting(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPrettyPrinting_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
false,
))
// Do a commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
repoInfo, err := c.InspectRepo(dataRepo)
require.NoError(t, err)
require.NoError(t, pfspretty.PrintDetailedRepoInfo(repoInfo))
for _, commitInfo := range commitInfos {
require.NoError(t, pfspretty.PrintDetailedCommitInfo(commitInfo))
}
fileInfo, err := c.InspectFile(dataRepo, commit.ID, "file", "", false, nil)
require.NoError(t, err)
require.NoError(t, pfspretty.PrintDetailedFileInfo(fileInfo))
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
require.NoError(t, ppspretty.PrintDetailedPipelineInfo(pipelineInfo))
jobInfos, err := c.ListJob("", nil)
require.NoError(t, err)
require.True(t, len(jobInfos) > 0)
require.NoError(t, ppspretty.PrintDetailedJobInfo(jobInfos[0]))
}
func TestDeleteAll(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
// this test cannot be run in parallel because it deletes everything
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestDeleteAll_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
false,
))
// Do commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
require.NoError(t, c.DeleteAll())
repoInfos, err := c.ListRepo(nil)
require.NoError(t, err)
require.Equal(t, 0, len(repoInfos))
pipelineInfos, err := c.ListPipeline()
require.NoError(t, err)
require.Equal(t, 0, len(pipelineInfos))
jobInfos, err := c.ListJob("", nil)
require.NoError(t, err)
require.Equal(t, 0, len(jobInfos))
}
func TestRecursiveCp(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestRecursiveCp_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("TestRecursiveCp")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
fmt.Sprintf("mkdir /inputs"),
fmt.Sprintf("cp -r /pfs/%s /inputs", dataRepo),
},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{
Repo: client.NewRepo(dataRepo),
Method: client.IncrementalReduceMethod,
}},
false,
))
// Do commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < 100; i++ {
_, err = c.PutFile(
dataRepo,
commit.ID,
fmt.Sprintf("file%d", i),
strings.NewReader(strings.Repeat("foo\n", 10000)),
)
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
}
func TestPipelineUniqueness(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
},
},
false,
))
err := c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
},
},
false,
)
require.YesError(t, err)
require.Matches(t, "pipeline .*? already exists", err.Error())
}
func TestPipelineInfoDestroyedIfRepoCreationFails(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreateRepo(pipelineName))
err := c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
},
},
false,
)
require.YesError(t, err)
require.Matches(t, "repo .* exists", err.Error())
_, err = c.InspectPipeline(pipelineName)
require.YesError(t, err)
require.Matches(t, "not found", err.Error())
}
func TestUpdatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestUpdatePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create 2 pipelines
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file1"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(dataRepo)}},
false,
))
pipeline2Name := uniqueString("pipeline2")
require.NoError(t, c.CreatePipeline(
pipeline2Name,
"",
[]string{"cp", path.Join("/pfs", pipelineName, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(pipelineName)}},
false,
))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file1", strings.NewReader("file1\n"))
_, err = c.PutFile(dataRepo, commit.ID, "file2", strings.NewReader("file2\n"))
_, err = c.PutFile(dataRepo, commit.ID, "file3", strings.NewReader("file3\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 3, len(commitInfos))
// only care about non-provenance commits
commitInfos = commitInfos[1:]
for _, commitInfo := range commitInfos {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "file1\n", buffer.String())
}
// We archive the temporary commits created per job/pod
// So the total we see here is 2, but 'real' commits is just 1
outputRepoCommitInfos, err := c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusAll, false)
require.NoError(t, err)
require.Equal(t, 2, len(outputRepoCommitInfos))
outputRepoCommitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusNormal, false)
require.NoError(t, err)
require.Equal(t, 1, len(outputRepoCommitInfos))
// Update the pipeline to look at file2
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file2"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
true,
))
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
require.NotNil(t, pipelineInfo.CreatedAt)
commitInfos, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 3, len(commitInfos))
// only care about non-provenance commits
commitInfos = commitInfos[1:]
for _, commitInfo := range commitInfos {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "file2\n", buffer.String())
}
outputRepoCommitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusAll, false)
require.NoError(t, err)
require.Equal(t, 4, len(outputRepoCommitInfos))
// Expect real commits to still be 1
outputRepoCommitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusNormal, false)
require.NoError(t, err)
require.Equal(t, 1, len(outputRepoCommitInfos))
// Update the pipeline to look at file3
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file3"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
true,
))
commitInfos, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 3, len(commitInfos))
// only care about non-provenance commits
commitInfos = commitInfos[1:]
for _, commitInfo := range commitInfos {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "file3\n", buffer.String())
}
outputRepoCommitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusAll, false)
require.NoError(t, err)
require.Equal(t, 6, len(outputRepoCommitInfos))
// Expect real commits to still be 1
outputRepoCommitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusNormal, false)
require.NoError(t, err)
require.Equal(t, 1, len(outputRepoCommitInfos))
commitInfos, _ = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusAll, false)
// Do an update that shouldn't cause archiving
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&ppsclient.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &ppsclient.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file3"), "/pfs/out/file"},
},
ParallelismSpec: &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 2,
},
Inputs: []*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
Update: true,
NoArchive: true,
})
require.NoError(t, err)
commitInfos, err = c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 3, len(commitInfos))
// only care about non-provenance commits
commitInfos = commitInfos[1:]
for _, commitInfo := range commitInfos {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "file3\n", buffer.String())
}
commitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusAll, false)
require.NoError(t, err)
require.Equal(t, 6, len(commitInfos))
// Expect real commits to still be 1
outputRepoCommitInfos, err = c.ListCommit([]*pfsclient.Commit{{
Repo: &pfsclient.Repo{pipelineName},
}}, nil, client.CommitTypeRead, client.CommitStatusNormal, false)
require.NoError(t, err)
require.Equal(t, 1, len(outputRepoCommitInfos))
}
func TestStopPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
false,
))
require.NoError(t, c.StopPipeline(pipelineName))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
// timeout because the Flush should never return since the pipeline is
// stopped
ctx, _ := context.WithTimeout(context.Background(), 20*time.Second)
_, err = c.PfsAPIClient.FlushCommit(
ctx,
&pfsclient.FlushCommitRequest{
Commit: []*pfsclient.Commit{commit1},
})
require.YesError(t, err)
require.NoError(t, c.StartPipeline(pipelineName))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{commit1}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, commitInfos[1].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
}
func TestPipelineEnv(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
// make a secret to reference
k := getKubeClient(t)
secretName := uniqueString("test-secret")
_, err := k.Secrets(api.NamespaceDefault).Create(
&api.Secret{
ObjectMeta: api.ObjectMeta{
Name: secretName,
},
Data: map[string][]byte{
"foo": []byte("foo\n"),
},
},
)
require.NoError(t, err)
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipelineEnv_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&ppsclient.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"ls /var/secret",
"cat /var/secret/foo > /pfs/out/foo",
"echo $bar> /pfs/out/bar",
},
Env: map[string]string{"bar": "bar"},
Secrets: []*ppsclient.Secret{
{
Name: secretName,
MountPath: "/var/secret",
},
},
},
ParallelismSpec: &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
Inputs: []*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
})
require.NoError(t, err)
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.Equal(t, 2, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, commitInfos[1].Commit.ID, "foo", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(pipelineName, commitInfos[1].Commit.ID, "bar", 0, 0, "", false, nil, &buffer))
require.Equal(t, "bar\n", buffer.String())
}
func TestFlushNonExistantCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
_, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit("fake-repo", "fake-commit")}, nil)
require.YesError(t, err)
repo := uniqueString("FlushNonExistantCommit")
require.NoError(t, c.CreateRepo(repo))
_, err = c.FlushCommit([]*pfsclient.Commit{client.NewCommit(repo, "fake-commit")}, nil)
require.YesError(t, err)
}
func TestPipelineWithFullObjects(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{
{
Repo: client.NewRepo(dataRepo),
Method: &ppsclient.Method{
Partition: ppsclient.Partition_BLOCK,
Incremental: ppsclient.Incremental_FULL,
},
},
},
false,
))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit(dataRepo, commit1.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
// Do second commit to repo
commit2, err := c.StartCommit(dataRepo, commit1.ID)
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
commitInfos, err = c.FlushCommit([]*pfsclient.Commit{client.NewCommit(dataRepo, commit2.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, "", false, nil, &buffer))
require.Equal(t, "foo\nbar\n", buffer.String())
}
func TestArchiveAllWithPipelines(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
// This test cannot be run in parallel, since it archives all repos
c := getUsablePachClient(t)
dataRepo := uniqueString("TestUpdatePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
numPipelines := 10
var outputRepos []*pfsclient.Repo
for i := 0; i < numPipelines; i++ {
pipelineName := uniqueString("pipeline")
outputRepos = append(outputRepos, &pfsclient.Repo{Name: pipelineName})
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file1"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(dataRepo)}},
false,
))
}
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file1", strings.NewReader("file1\n"))
_, err = c.PutFile(dataRepo, commit.ID, "file2", strings.NewReader("file2\n"))
_, err = c.PutFile(dataRepo, commit.ID, "file3", strings.NewReader("file3\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, numPipelines+1, len(commitInfos))
require.NoError(t, c.ArchiveAll())
commitInfos, err = c.ListCommit(
[]*pfsclient.Commit{{
Repo: &pfsclient.Repo{dataRepo},
}},
nil,
client.CommitTypeNone,
client.CommitStatusNormal,
false,
)
require.NoError(t, err)
require.Equal(t, 0, len(commitInfos))
}
// This test / failure pattern shouldn't be possible after
// the pfs-refactor branch lands
func TestListCommitReturnsBlankCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Skip("This test does a restart which seems to break other tests.")
// this test cannot be run in parallel because it restarts everything which breaks other tests.
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestListCommitReturnsBlankCommit")
require.NoError(t, c.CreateRepo(dataRepo))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
FromCommits: []*pfsclient.Commit{{
Repo: &pfsclient.Repo{dataRepo},
}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
commitInfos, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
require.Equal(t, 1, len(commitInfos.CommitInfo))
restartAll(t)
// need a new client because the old one will have a defunct connection
c = getUsablePachClient(t)
commitInfos, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
// In the buggy behaviour, after restarting we'd see 2 commits, one of
// which is the 'blank' commit that's created when creating a repo
require.Equal(t, 1, len(commitInfos.CommitInfo))
}
// TestChainedPipelines tracks https://github.com/pachyderm/pachyderm/issues/797
func TestChainedPipelines(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
aRepo := uniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
dRepo := uniqueString("D")
require.NoError(t, c.CreateRepo(dRepo))
aCommit, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, "master"))
dCommit, err := c.StartCommit(dRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dRepo, "master", "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dRepo, "master"))
bPipeline := uniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"},
nil,
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(aRepo)}},
false,
))
cPipeline := uniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("cp /pfs/%s/file /pfs/out/bFile", bPipeline),
fmt.Sprintf("cp /pfs/%s/file /pfs/out/dFile", dRepo)},
&ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 1,
},
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(bPipeline)},
{Repo: client.NewRepo(dRepo)}},
false,
))
results, err := c.FlushCommit([]*pfsclient.Commit{aCommit, dCommit}, nil)
require.NoError(t, err)
require.Equal(t, 4, len(results))
}
func TestParallelismSpec(t *testing.T) {
// Test Constant strategy
parellelism, err := pps_server.GetExpectedNumWorkers(getKubeClient(t), &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_CONSTANT,
Constant: 7,
})
require.NoError(t, err)
require.Equal(t, uint64(7), parellelism)
// Coefficient == 1 (basic test)
parellelism, err = pps_server.GetExpectedNumWorkers(getKubeClient(t), &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_COEFFICIENT,
Coefficient: 1,
})
require.NoError(t, err)
require.Equal(t, uint64(1), parellelism)
// Coefficient > 1
parellelism, err = pps_server.GetExpectedNumWorkers(getKubeClient(t), &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_COEFFICIENT,
Coefficient: 2,
})
require.NoError(t, err)
require.Equal(t, uint64(2), parellelism)
// Make sure we start at least one worker
parellelism, err = pps_server.GetExpectedNumWorkers(getKubeClient(t), &ppsclient.ParallelismSpec{
Strategy: ppsclient.ParallelismSpec_COEFFICIENT,
Coefficient: 0.1,
})
require.NoError(t, err)
require.Equal(t, uint64(1), parellelism)
// Test 0-initialized JobSpec
parellelism, err = pps_server.GetExpectedNumWorkers(getKubeClient(t), &ppsclient.ParallelismSpec{})
require.NoError(t, err)
require.Equal(t, uint64(1), parellelism)
// Test nil JobSpec
parellelism, err = pps_server.GetExpectedNumWorkers(getKubeClient(t), nil)
require.NoError(t, err)
require.Equal(t, uint64(1), parellelism)
}
func getPachClient(t testing.TB) *client.APIClient {
client, err := client.NewFromAddress("0.0.0.0:30650")
require.NoError(t, err)
return client
}
const (
retries = 10
)
// getUsablePachClient is like getPachClient except it blocks until it gets a
// connection that actually works
func getUsablePachClient(t *testing.T) *client.APIClient {
for i := 0; i < retries; i++ {
client := getPachClient(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
_, err := client.PfsAPIClient.ListRepo(ctx, &pfsclient.ListRepoRequest{})
if err == nil {
return client
}
}
t.Fatalf("failed to connect after %d tries", retries)
return nil
}
func getKubeClient(t *testing.T) *kube.Client {
config := &kube_client.Config{
Host: "0.0.0.0:8080",
Insecure: false,
}
k, err := kube.New(config)
require.NoError(t, err)
return k
}
func uniqueString(prefix string) string {
return prefix + uuid.NewWithoutDashes()[0:12]
}
func pachdRc(t *testing.T) *api.ReplicationController {
k := getKubeClient(t)
rc := k.ReplicationControllers(api.NamespaceDefault)
result, err := rc.Get("pachd")
require.NoError(t, err)
return result
}
// scalePachd scales the number of pachd nodes up or down.
// If up is true, then the number of nodes will be within (n, 2n]
// If up is false, then the number of nodes will be within [1, n)
func scalePachd(t *testing.T, up bool) {
k := getKubeClient(t)
pachdRc := pachdRc(t)
originalReplicas := pachdRc.Spec.Replicas
for {
if up {
pachdRc.Spec.Replicas = originalReplicas + int32(rand.Intn(int(originalReplicas))+1)
} else {
pachdRc.Spec.Replicas = int32(rand.Intn(int(originalReplicas)-1) + 1)
}
if pachdRc.Spec.Replicas != originalReplicas {
break
}
}
fmt.Printf("scaling pachd to %d replicas\n", pachdRc.Spec.Replicas)
rc := k.ReplicationControllers(api.NamespaceDefault)
_, err := rc.Update(pachdRc)
require.NoError(t, err)
waitForReadiness(t)
// Unfortunately, even when all pods are ready, the cluster membership
// protocol might still be running, thus PFS API calls might fail. So
// we wait a little bit for membership to stablize.
time.Sleep(15 * time.Second)
}
func scalePachdUp(t *testing.T) {
scalePachd(t, true)
}
func scalePachdDown(t *testing.T) {
scalePachd(t, false)
}
func waitForReadiness(t *testing.T) {
k := getKubeClient(t)
rc := pachdRc(t)
for {
has, err := kube.ControllerHasDesiredReplicas(k, rc)()
require.NoError(t, err)
if has {
break
}
time.Sleep(time.Second * 5)
}
watch, err := k.Pods(api.NamespaceDefault).Watch(api.ListOptions{
LabelSelector: kube_labels.SelectorFromSet(map[string]string{"app": "pachd"}),
})
defer watch.Stop()
require.NoError(t, err)
readyPods := make(map[string]bool)
for event := range watch.ResultChan() {
ready, err := kube.PodRunningAndReady(event)
require.NoError(t, err)
if ready {
pod, ok := event.Object.(*api.Pod)
if !ok {
t.Fatal("event.Object should be an object")
}
readyPods[pod.Name] = true
if len(readyPods) == int(rc.Spec.Replicas) {
break
}
}
}
}
func restartAll(t *testing.T) {
k := getKubeClient(t)
podsInterface := k.Pods(api.NamespaceDefault)
labelSelector, err := labels.Parse("suite=pachyderm")
require.NoError(t, err)
podList, err := podsInterface.List(
api.ListOptions{
LabelSelector: labelSelector,
})
require.NoError(t, err)
for _, pod := range podList.Items {
require.NoError(t, podsInterface.Delete(pod.Name, api.NewDeleteOptions(0)))
}
waitForReadiness(t)
}
func restartOne(t *testing.T) {
k := getKubeClient(t)
podsInterface := k.Pods(api.NamespaceDefault)
labelSelector, err := labels.Parse("app=pachd")
require.NoError(t, err)
podList, err := podsInterface.List(
api.ListOptions{
LabelSelector: labelSelector,
})
require.NoError(t, err)
require.NoError(t, podsInterface.Delete(podList.Items[rand.Intn(len(podList.Items))].Name, api.NewDeleteOptions(0)))
waitForReadiness(t)
}
|
package sprite
import (
"fmt"
"math/rand"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/runningwild/glop/render"
"github.com/runningwild/glop/util/algorithm"
"github.com/runningwild/opengl/gl"
"github.com/runningwild/opengl/glu"
"github.com/runningwild/yedparse"
)
const (
defaultFrameTime = 100
)
type spriteError struct {
Msg string
}
func (e *spriteError) Error() string {
return e.Msg
}
// attempt to make a relative path, otherwise leaves it alone
func tryRelPath(base,path string) string {
rel,err := filepath.Rel(base, path)
if err == nil {
return rel
}
return path
}
// utility function since we need to find the start node on any graph we use
func getStartNode(g *yed.Graph) *yed.Node {
for i := 0; i < g.NumNodes(); i++ {
if g.Node(i).Tag("mark") == "start" {
return g.Node(i)
}
}
return nil
}
// Valid state and anim graphs have the following properties:
// * All nodes are labeled
// * It has exactly one node that has the tag "mark" : "start"
// * All nodes in the graph can be reached by starting at the start node
// * All nodes and edges have only the specified tags
func verifyAnyGraph(graph *yed.Graph, node_tags,edge_tags []string) error {
valid_node_tags := make(map[string]bool)
for _,tag := range node_tags {
valid_node_tags[tag] = true
}
valid_edge_tags := make(map[string]bool)
for _,tag := range edge_tags {
valid_edge_tags[tag] = true
}
// Check that all nodes have labels
for i := 0; i < graph.NumNodes(); i++ {
node := graph.Node(i)
if node.NumLines() == 0 || strings.Contains(node.Line(0), ":") {
return &spriteError{ "contains an unlabeled node" }
}
}
// Check that there is exactly one start node
var start *yed.Node
for i := 0; i < graph.NumNodes(); i++ {
if graph.Node(i).Tag("mark") == "start" {
if start == nil {
start = graph.Node(i)
} else {
return &spriteError{ "more than one node is marked as the start node" }
}
}
}
if start == nil {
return &spriteError{ "no start node was found" }
}
// Check that all nodes can be reached by the start node
used := make(map[*yed.Node]bool)
next := make(map[*yed.Node]bool)
next[start] = true
for len(next) > 0 {
var nodes []*yed.Node
for node := range next {
nodes = append(nodes, node)
}
for _,node := range nodes {
delete(next, node)
used[node] = true
}
for _,node := range nodes {
// Traverse the parent
if node.Group() != nil && !used[node.Group()] {
next[node.Group()] = true
}
// Traverse all the children
for i := 0; i < node.NumChildren(); i++ {
if !used[node.Child(i)] {
next[node.Child(i)] = true
}
}
// Traverse all outputs
for i := 0; i < node.NumOutputs(); i++ {
adj := node.Output(i).Dst()
if !used[adj] {
next[adj] = true
}
}
}
}
if len(used) != graph.NumNodes() {
return &spriteError{ "not all nodes are reachable from the start node" }
}
// Check that nodes only have the specified tags
for i := 0; i < graph.NumNodes(); i++ {
node := graph.Node(i)
for _,tag := range node.TagKeys() {
if !(valid_node_tags[tag] || (node == start && tag == "mark")){
return &spriteError{ fmt.Sprintf("a node has an unknown tag (%s)", tag) }
}
}
}
// Check that edges only have the specified tags
for i := 0; i < graph.NumEdges(); i++ {
edge := graph.Edge(i)
for _,tag := range edge.TagKeys() {
if !valid_edge_tags[tag] {
return &spriteError{ fmt.Sprintf("an edge has an unknown tag (%s)", tag) }
}
}
}
return nil
}
// A valid state graph has the following properties in addition to those
// specified in verifyAnyGraph():
// * All output edges from the start node have labels
// * No node has more than one unlabeled output edge
// * There are no tags on any nodes except for the start node
// * There are no groups
func verifyStateGraph(graph *yed.Graph) error {
err := verifyAnyGraph(graph, []string{}, []string{"facing"})
if err != nil { return &spriteError{ fmt.Sprintf("State graph: %v", err) } }
start := getStartNode(graph)
// Check that all output edges from the start node have labels
for i := 0; i < start.NumOutputs(); i++ {
edge := start.Output(i)
if edge.NumLines() == 0 || strings.Contains(edge.Line(0), ":") {
return &spriteError{ "State graph: The start node has an unlabeled output edge" }
}
}
// Check that no node has more than one unlabeled output edge
for i := 0; i < graph.NumNodes(); i++ {
node := graph.Node(i)
num_labels := 0
for j := 0; j < node.NumOutputs(); j++ {
edge := node.Output(j)
if edge.NumLines() > 0 && !strings.Contains(edge.Line(0), ":") {
num_labels++
}
}
if num_labels < node.NumOutputs() - 1 {
return &spriteError{ fmt.Sprintf("State graph: Found more than one unlabeled output edge on node '%s'", node.Line(0)) }
}
}
// Check that no nodes are groups
for i := 0; i < graph.NumNodes(); i++ {
node := graph.Node(i)
if node.NumChildren() > 0 {
return &spriteError{ "State graph: cannot contain groups" }
}
}
return nil
}
// A valid anim graph has the properties specified in verifyAnyGraph()
func verifyAnimGraph(graph *yed.Graph) error {
err := verifyAnyGraph(graph, []string{"time", "sync"}, []string{"facing", "weight"})
if err != nil { return &spriteError{ fmt.Sprintf("Anim graph: %v", err) } }
return nil
}
// Traverse the directory and do the following things:
// * There are n > 0 directories
// * There is at most 1 other file immediately within path - a thumb.png
// * All of the directories have names that are integers 0 - (n-1)
// * No image is present in any facing that isn't present in the anim graph
func verifyDirectoryStructure(path string, graph *yed.Graph) (num_facings int, filenames []string, err error) {
filepath.Walk(path, func(cpath string, info os.FileInfo, _err error) error {
if _err != nil {
err = _err
return err
}
if cpath == path {
return nil
}
// skip hidden files
if _,file := filepath.Split(cpath); file[0] == '.' {
return nil
}
if info.IsDir() {
num_facings++
return filepath.SkipDir
} else {
switch {
case info.Name() == "anim.xgml":
case info.Name() == "state.xgml":
case info.Name() == "thumb.png":
case strings.HasSuffix(info.Name(), ".gob"):
default:
err = &spriteError{ fmt.Sprintf("Unexpected file found in sprite directory, %s", tryRelPath(path, cpath)) }
return err
}
}
return nil
})
if err != nil { return }
if num_facings == 0 {
err = &spriteError{ "Found no facings in the sprite directory" }
return
}
// Create a set of valid png filenames. If a .png shows up that is not in
// this set then we raise an error. Non-png files are allowed and are
// ignored.
valid_names := make(map[string]bool)
for i := 0; i < graph.NumNodes(); i++ {
valid_names[graph.Node(i).Line(0) + ".png"] = true
}
filenames_map := make(map[string]bool)
for facing := 0; facing < num_facings; facing++ {
cur := filepath.Join(path, fmt.Sprintf("%d", facing))
filepath.Walk(cur, func(cpath string, info os.FileInfo, _err error) error {
if _err != nil {
err = _err
return err
}
if cpath == cur {
return nil
}
// skip hidden files
if _,file := filepath.Split(cpath); file[0] == '.' {
return nil
}
if info.IsDir() {
err = &spriteError{ fmt.Sprintf("Found a directory inside facing directory %d, %s", facing, tryRelPath(path, cpath)) }
return err
}
if filepath.Ext(cpath) == ".png" {
base := filepath.Base(cpath)
if valid_names[base] {
filenames_map[base] = true
} else {
err = &spriteError{ fmt.Sprintf("Found an unused .png file: %s", tryRelPath(path, cpath))}
}
return err
}
return nil
})
}
for filename := range filenames_map {
filenames = append(filenames, filename)
}
sort.Strings(filenames)
return
}
// Used to determine what frames to keep permanently in texture memory, and
// which ones to unload when not needed
type animAlgoGraph struct {
anim *yed.Graph
}
func (cg *animAlgoGraph) NumVertex() int {
return cg.anim.NumNodes()
}
func (cg *animAlgoGraph) Adjacent(n int) (adj []int, cost []float64) {
node := cg.anim.Node(n)
var delay float64 = defaultFrameTime
if node.Tag("time") != "" {
t,err := strconv.ParseFloat(node.Tag("time"), 64)
if err == nil {
delay = t
} else {
// TODO: Should log this as a warning or something
}
}
for i := 0; i < node.NumGroupOutputs(); i++ {
edge := node.GroupOutput(i)
adj = append(adj, edge.Dst().Id())
// frames that are part of groups can be cancelled at any time if the
// animation is supposed to proceed out of the group, so if an edge leads
// away from the current group we will assume that it has a delay of 0.
if node.Group() != nil && edge.Dst().Group() != node.Group() {
cost = append(cost, 0)
} else {
cost = append(cost, delay)
}
}
return
}
type Sprite struct {
shared *sharedSprite
anim_node *yed.Node
state_node *yed.Node
// number of times Think() has been called. This is mostly so that we can
// run some code the very first time that Think() is called.
thinks int
// current facing - needed to index into the appropriate sheet in shared
facing int
// previous facing - tracking this lets us prevent having to load/unload
// lots of facings if a sprite changes facings multiple times between thinks
prev_facing int
// Time remaining on the current frame of animation
togo int64
// If len(path) > 0 then this is the series of animation frames that will be
// used next
path []*yed.Node
// commands that have been accepted by the state graph but haven't been
// processed by the anim graph. When path is empty a cmd will be taken from
// this list and be used to generate the next path.
pending_cmds []command
// Used to keep track of the state that the current frame of animation
// represents.
anim_states []string
}
type command struct {
names []string // List of names of edges
group *commandGroup
}
type commandGroup struct {
// This is the tag that all of the sprites in this group will sync to
sync_tag string
// all of the sprites in this list must have this commandGroup as part of
// their next command to execute before any of them will execute it.
sprites []*Sprite
// If ready() ever returns true then this will be set to true and read()
// will always return true after that. This prevents a situation where one
// sprite starts executing this command and then other sprites think they
// aren't ready because one of them has already progressed passed this
// command.
was_ready bool
}
// Returns true iff all sprites in this group have no pending cmds before this
// one, and no nodes remaining in their immediate path.
func (cg *commandGroup) ready() bool {
if cg.was_ready {
return true
}
for _, sp := range cg.sprites {
if len(sp.path) > 0 { return false }
if len(sp.pending_cmds) == 0 { return false } // This one is a serious problem
if sp.pending_cmds[0].group != cg { return false }
}
cg.was_ready = true
return true
}
func (s *Sprite) State() string {
return s.state_node.Line(0)
}
func (s *Sprite) Anim() string {
return s.anim_node.Line(0)
}
func (s *Sprite) AnimState() string {
if len(s.anim_states) == 0 {
return s.State()
}
return s.anim_states[0]
}
// selects an outgoing edge from node random among those outgoing edges that
// have cmd listed in cmds. The random choice is weighted by the weights
// found in edge_data
func selectAnEdge(node *yed.Node, edge_data map[*yed.Edge]edgeData, cmds []string) *yed.Edge {
cmd_map := make(map[string]bool)
for _,cmd := range cmds {
cmd_map[cmd] = true
}
total := 0.0
for i := 0; i < node.NumOutputs(); i++ {
edge := node.Output(i)
if _,ok := cmd_map[edge_data[edge].cmd]; !ok { continue }
total += edge_data[edge].weight
}
if total > 0 {
pick := rand.Float64() * total
total = 0.0
for i := 0; i < node.NumOutputs(); i++ {
edge := node.Output(i)
if _,ok := cmd_map[edge_data[edge].cmd]; !ok { continue }
total += edge_data[edge].weight
if total >= pick {
return edge
}
}
}
return nil
}
// Returns the edge that leads from a, or an ancestor of a, to b, or an
// ancestor of b
func edgeTo(a,b *yed.Node) *yed.Edge {
for i := 0; i < a.NumGroupOutputs(); i++ {
edge := a.GroupOutput(i)
for cb := b; cb != nil; cb = cb.Group() {
if edge.Dst() == cb {
return edge
}
}
}
return nil
}
func CommandSync(ss []*Sprite, cmds []string, sync_tag string) {
// Go through each sprite, if it can execute the specified command then add
// it to the group (and if it can't, don't).
var group commandGroup
group.sync_tag = sync_tag
for i := range ss {
cmd := command{
names: []string{cmds[i]},
group: &group,
}
if ss[i].baseCommand(cmd) {
group.sprites = append(group.sprites, ss[i])
}
}
}
func (s *Sprite) baseCommand(cmd command) bool {
state_edge := selectAnEdge(s.state_node, s.shared.edge_data, []string{cmd.names[0]})
if state_edge == nil { return false }
s.anim_states = append(s.anim_states, s.state_node.Line(0))
s.state_node = state_edge.Dst()
state_edge = selectAnEdge(s.state_node, s.shared.edge_data, []string{""})
for state_edge != nil {
// If this command is synced then we first need to make sure that we'll
// be able to get to the appropriate sync tag
// if cmd.group != nil && cmd.group.sync_tag != "" {
// dst := state_edge.Dst()
// s.shared.node_data
// }
s.state_node = state_edge.Dst()
state_edge = selectAnEdge(s.state_node, s.shared.edge_data, []string{""})
}
s.pending_cmds = append(s.pending_cmds, cmd)
return true
}
func (s *Sprite) Command(cmd string) {
s.baseCommand(command{ names: []string{cmd}, group: nil })
}
func (s *Sprite) CommandN(cmds []string) {
s.baseCommand(command{ names: cmds, group: nil })
}
// This is a specialized wrapper around a yed.Graph that allows for the start
// node to be differentiated from the ending node in a path in the event that
// they are the same node in the original graph. This means that if a path is
// requested from one node to the same node that the resulting path will not
// be length 0.
type pathingGraph struct {
shared *sharedSprite
// graph *yed.Graph
start *yed.Node
// Edges will only be followed if there is no command associated with them,
// or if the command associated with them is the same as this command.
cmd string
}
func (p pathingGraph) NumVertex() int {
return p.shared.anim.NumNodes() + 1
}
func (p pathingGraph) Adjacent(n int) (adj []int, cost []float64) {
var node *yed.Node
if n == p.shared.anim.NumNodes() {
node = p.start
} else {
node = p.shared.anim.Node(n)
}
for i := 0; i < node.NumGroupOutputs(); i++ {
edge := node.GroupOutput(i)
if p.shared.edge_data[edge].cmd != "" && p.shared.edge_data[edge].cmd != p.cmd {
continue
}
adj = append(adj, edge.Dst().Id())
cost = append(cost, 1)
}
return
}
// If this returns nil it means this sprite isn't ready for a new path
// If this returns a path with length 0 it means there wasn't a valid path
func (s *Sprite) findPathForCmd(cmd command, anim_node *yed.Node) []*yed.Node {
// If the next command is supposed to be synced with other sprites then we
// need to wait until those sprites are ready before we all proceed.
if s.pending_cmds[0].group != nil && !s.pending_cmds[0].group.ready() {
return nil
}
var node_path []*yed.Node
for _, name := range cmd.names {
g := pathingGraph{ shared: s.shared, start: anim_node, cmd: name }
var end []int
for i := 0; i < s.shared.anim.NumEdges(); i++ {
edge := s.shared.anim.Edge(i)
if s.shared.edge_data[edge].cmd == name {
end = append(end, edge.Dst().Id())
}
}
_, path := algorithm.Dijkstra(g, []int{ s.shared.anim.NumNodes() }, end)
for _,id := range path[1:] {
node_path = append(node_path, s.shared.anim.Node(id))
}
if len(node_path) > 0 {
anim_node = node_path[len(node_path) - 1]
}
}
return node_path
}
func (s *Sprite) applyPath(path []*yed.Node) {
for _, n := range path {
s.path = append(s.path, n)
}
}
func (s *Sprite) Dims() (dx, dy int) {
var rect FrameRect
var ok bool
fid := frameId{ facing: s.facing, node: s.anim_node.Id() }
rect,ok = s.shared.connector.rects[fid]
if !ok {
rect,ok = s.shared.facings[s.facing].rects[fid]
if !ok {
return 0, 0
}
}
dx = rect.X2 - rect.X
dy = rect.Y2 - rect.Y
return
}
func (s *Sprite) Bind() (x,y,x2,y2 float64) {
var rect FrameRect
var sh *sheet
var ok bool
fid := frameId{ facing: s.facing, node: s.anim_node.Id() }
var dx,dy float64
if rect,ok = s.shared.connector.rects[fid]; ok {
sh = s.shared.connector
} else if rect,ok = s.shared.facings[s.facing].rects[fid]; ok {
sh = s.shared.facings[s.facing]
} else {
error_texture.Bind(gl.TEXTURE_2D)
return
}
sh.texture.Bind(gl.TEXTURE_2D)
dx = float64(sh.dx)
dy = float64(sh.dy)
x = float64(rect.X) / dx
y = float64(rect.Y) / dy
x2 = float64(rect.X2) / dx
y2 = float64(rect.Y2) / dy
return
}
func (s *Sprite) Facing() int {
return s.facing
}
func (s *Sprite) Think(dt int64) {
if s.thinks == 0 {
s.shared.facings[0].Load()
s.togo = s.shared.node_data[s.anim_node].time
}
s.thinks++
if dt < 0 {
return
// panic("Can't have dt < 0")
}
var path []*yed.Node
if len(s.pending_cmds) > 0 && len(s.path) == 0 {
path = s.findPathForCmd(s.pending_cmds[0], s.anim_node)
}
if path != nil {
s.applyPath(path)
s.pending_cmds = s.pending_cmds[1:]
}
if len(s.path) > 0 && s.anim_node.Group() != nil {
// If the current node is in a group that has an edge to the next node
// then we want to follow that edge immediately rather than waiting for
// the time for this frame to elapse
for i := 0; i < s.anim_node.NumGroupOutputs(); i++ {
edge := s.anim_node.GroupOutput(i)
if edge.Src() == s.anim_node { continue }
if edge.Dst() == s.path[0] {
s.togo = 0
}
}
}
if s.togo >= dt {
s.togo -= dt
if s.facing != s.prev_facing {
s.shared.facings[s.prev_facing].Unload()
s.shared.facings[s.facing].Load()
s.prev_facing = s.facing
}
return
}
dt -= s.togo
var next *yed.Node
if len(s.path) > 0 {
next = s.path[0]
s.path = s.path[1:]
} else {
edge := selectAnEdge(s.anim_node, s.shared.edge_data, []string{""})
if edge != nil {
next = edge.Dst()
} else {
next = s.anim_node
}
}
if next != nil {
edge := edgeTo(s.anim_node, next)
face := s.shared.edge_data[edge].facing
if face != 0 {
s.facing = (s.facing + face + len(s.shared.facings)) % len(s.shared.facings)
}
if s.shared.edge_data[edge].cmd != "" {
if len(s.anim_states) == 0 {
s.anim_states = nil
} else {
s.anim_states = s.anim_states[1:]
}
}
}
s.anim_node = next
s.togo = s.shared.node_data[s.anim_node].time
s.Think(dt)
}
type nodeData struct {
time int64
sync_tag string
}
type edgeData struct {
facing int
weight float64
cmd string
}
type Data struct {
state *yed.Node
anim *yed.Node
}
type FrameRect struct {
X,Y,X2,Y2 int
}
type Manager struct {
shared map[string]*sharedSprite
mutex sync.Mutex
}
func MakeManager() *Manager {
var m Manager
m.shared = make(map[string]*sharedSprite)
return &m
}
var the_manager *Manager
var error_texture gl.Texture
var gen_tex_once sync.Once
func init() {
the_manager = MakeManager()
}
func LoadSprite(path string) (*Sprite, error) {
return the_manager.LoadSprite(path)
}
func (m *Manager) loadSharedSprite(path string) error {
m.mutex.Lock()
defer m.mutex.Unlock()
if _,ok := m.shared[path]; ok {
return nil
}
ss,err := loadSharedSprite(path)
m.shared[path] = ss
return err
}
func (m *Manager) LoadSprite(path string) (*Sprite, error) {
// We can't run this during an init() function because it will get queued to
// run before the opengl context is created, so we just check here and run
// it if we haven't run it before.
gen_tex_once.Do(func() {
render.Queue(func() {
gl.Enable(gl.TEXTURE_2D)
error_texture = gl.GenTexture()
error_texture.Bind(gl.TEXTURE_2D)
gl.TexEnvf(gl.TEXTURE_ENV, gl.TEXTURE_ENV_MODE, gl.MODULATE)
gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR_MIPMAP_LINEAR)
gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR)
gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.REPEAT)
gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.REPEAT)
pink := []byte{ 255, 0, 255, 255 }
glu.Build2DMipmaps(gl.TEXTURE_2D, 4, 1, 1, gl.RGBA, pink)
})
})
path = filepath.Clean(path)
err := m.loadSharedSprite(path)
if err != nil { return nil, err }
var s Sprite
m.mutex.Lock()
s.shared = m.shared[path]
m.mutex.Unlock()
s.anim_node = s.shared.anim_start
s.state_node = s.shared.state_start
return &s, nil
}
Fixed something that was probably a bug
package sprite
import (
"fmt"
"math/rand"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"github.com/runningwild/glop/render"
"github.com/runningwild/glop/util/algorithm"
"github.com/runningwild/opengl/gl"
"github.com/runningwild/opengl/glu"
"github.com/runningwild/yedparse"
)
const (
defaultFrameTime = 100
)
type spriteError struct {
Msg string
}
func (e *spriteError) Error() string {
return e.Msg
}
// attempt to make a relative path, otherwise leaves it alone
func tryRelPath(base,path string) string {
rel,err := filepath.Rel(base, path)
if err == nil {
return rel
}
return path
}
// utility function since we need to find the start node on any graph we use
func getStartNode(g *yed.Graph) *yed.Node {
for i := 0; i < g.NumNodes(); i++ {
if g.Node(i).Tag("mark") == "start" {
return g.Node(i)
}
}
return nil
}
// Valid state and anim graphs have the following properties:
// * All nodes are labeled
// * It has exactly one node that has the tag "mark" : "start"
// * All nodes in the graph can be reached by starting at the start node
// * All nodes and edges have only the specified tags
func verifyAnyGraph(graph *yed.Graph, node_tags,edge_tags []string) error {
valid_node_tags := make(map[string]bool)
for _,tag := range node_tags {
valid_node_tags[tag] = true
}
valid_edge_tags := make(map[string]bool)
for _,tag := range edge_tags {
valid_edge_tags[tag] = true
}
// Check that all nodes have labels
for i := 0; i < graph.NumNodes(); i++ {
node := graph.Node(i)
if node.NumLines() == 0 || strings.Contains(node.Line(0), ":") {
return &spriteError{ "contains an unlabeled node" }
}
}
// Check that there is exactly one start node
var start *yed.Node
for i := 0; i < graph.NumNodes(); i++ {
if graph.Node(i).Tag("mark") == "start" {
if start == nil {
start = graph.Node(i)
} else {
return &spriteError{ "more than one node is marked as the start node" }
}
}
}
if start == nil {
return &spriteError{ "no start node was found" }
}
// Check that all nodes can be reached by the start node
used := make(map[*yed.Node]bool)
next := make(map[*yed.Node]bool)
next[start] = true
for len(next) > 0 {
var nodes []*yed.Node
for node := range next {
nodes = append(nodes, node)
}
for _,node := range nodes {
delete(next, node)
used[node] = true
}
for _,node := range nodes {
// Traverse the parent
if node.Group() != nil && !used[node.Group()] {
next[node.Group()] = true
}
// Traverse all the children
for i := 0; i < node.NumChildren(); i++ {
if !used[node.Child(i)] {
next[node.Child(i)] = true
}
}
// Traverse all outputs
for i := 0; i < node.NumOutputs(); i++ {
adj := node.Output(i).Dst()
if !used[adj] {
next[adj] = true
}
}
}
}
if len(used) != graph.NumNodes() {
return &spriteError{ "not all nodes are reachable from the start node" }
}
// Check that nodes only have the specified tags
for i := 0; i < graph.NumNodes(); i++ {
node := graph.Node(i)
for _,tag := range node.TagKeys() {
if !(valid_node_tags[tag] || (node == start && tag == "mark")){
return &spriteError{ fmt.Sprintf("a node has an unknown tag (%s)", tag) }
}
}
}
// Check that edges only have the specified tags
for i := 0; i < graph.NumEdges(); i++ {
edge := graph.Edge(i)
for _,tag := range edge.TagKeys() {
if !valid_edge_tags[tag] {
return &spriteError{ fmt.Sprintf("an edge has an unknown tag (%s)", tag) }
}
}
}
return nil
}
// A valid state graph has the following properties in addition to those
// specified in verifyAnyGraph():
// * All output edges from the start node have labels
// * No node has more than one unlabeled output edge
// * There are no tags on any nodes except for the start node
// * There are no groups
func verifyStateGraph(graph *yed.Graph) error {
err := verifyAnyGraph(graph, []string{}, []string{"facing"})
if err != nil { return &spriteError{ fmt.Sprintf("State graph: %v", err) } }
start := getStartNode(graph)
// Check that all output edges from the start node have labels
for i := 0; i < start.NumOutputs(); i++ {
edge := start.Output(i)
if edge.NumLines() == 0 || strings.Contains(edge.Line(0), ":") {
return &spriteError{ "State graph: The start node has an unlabeled output edge" }
}
}
// Check that no node has more than one unlabeled output edge
for i := 0; i < graph.NumNodes(); i++ {
node := graph.Node(i)
num_labels := 0
for j := 0; j < node.NumOutputs(); j++ {
edge := node.Output(j)
if edge.NumLines() > 0 && !strings.Contains(edge.Line(0), ":") {
num_labels++
}
}
if num_labels < node.NumOutputs() - 1 {
return &spriteError{ fmt.Sprintf("State graph: Found more than one unlabeled output edge on node '%s'", node.Line(0)) }
}
}
// Check that no nodes are groups
for i := 0; i < graph.NumNodes(); i++ {
node := graph.Node(i)
if node.NumChildren() > 0 {
return &spriteError{ "State graph: cannot contain groups" }
}
}
return nil
}
// A valid anim graph has the properties specified in verifyAnyGraph()
func verifyAnimGraph(graph *yed.Graph) error {
err := verifyAnyGraph(graph, []string{"time", "sync"}, []string{"facing", "weight"})
if err != nil { return &spriteError{ fmt.Sprintf("Anim graph: %v", err) } }
return nil
}
// Traverse the directory and do the following things:
// * There are n > 0 directories
// * There is at most 1 other file immediately within path - a thumb.png
// * All of the directories have names that are integers 0 - (n-1)
// * No image is present in any facing that isn't present in the anim graph
func verifyDirectoryStructure(path string, graph *yed.Graph) (num_facings int, filenames []string, err error) {
filepath.Walk(path, func(cpath string, info os.FileInfo, _err error) error {
if _err != nil {
err = _err
return err
}
if cpath == path {
return nil
}
// skip hidden files
if _,file := filepath.Split(cpath); file[0] == '.' {
return nil
}
if info.IsDir() {
num_facings++
return filepath.SkipDir
} else {
switch {
case info.Name() == "anim.xgml":
case info.Name() == "state.xgml":
case info.Name() == "thumb.png":
case strings.HasSuffix(info.Name(), ".gob"):
default:
err = &spriteError{ fmt.Sprintf("Unexpected file found in sprite directory, %s", tryRelPath(path, cpath)) }
return err
}
}
return nil
})
if err != nil { return }
if num_facings == 0 {
err = &spriteError{ "Found no facings in the sprite directory" }
return
}
// Create a set of valid png filenames. If a .png shows up that is not in
// this set then we raise an error. Non-png files are allowed and are
// ignored.
valid_names := make(map[string]bool)
for i := 0; i < graph.NumNodes(); i++ {
valid_names[graph.Node(i).Line(0) + ".png"] = true
}
filenames_map := make(map[string]bool)
for facing := 0; facing < num_facings; facing++ {
cur := filepath.Join(path, fmt.Sprintf("%d", facing))
filepath.Walk(cur, func(cpath string, info os.FileInfo, _err error) error {
if _err != nil {
err = _err
return err
}
if cpath == cur {
return nil
}
// skip hidden files
if _,file := filepath.Split(cpath); file[0] == '.' {
return nil
}
if info.IsDir() {
err = &spriteError{ fmt.Sprintf("Found a directory inside facing directory %d, %s", facing, tryRelPath(path, cpath)) }
return err
}
if filepath.Ext(cpath) == ".png" {
base := filepath.Base(cpath)
if valid_names[base] {
filenames_map[base] = true
} else {
err = &spriteError{ fmt.Sprintf("Found an unused .png file: %s", tryRelPath(path, cpath))}
}
return err
}
return nil
})
}
for filename := range filenames_map {
filenames = append(filenames, filename)
}
sort.Strings(filenames)
return
}
// Used to determine what frames to keep permanently in texture memory, and
// which ones to unload when not needed
type animAlgoGraph struct {
anim *yed.Graph
}
func (cg *animAlgoGraph) NumVertex() int {
return cg.anim.NumNodes()
}
func (cg *animAlgoGraph) Adjacent(n int) (adj []int, cost []float64) {
node := cg.anim.Node(n)
var delay float64 = defaultFrameTime
if node.Tag("time") != "" {
t,err := strconv.ParseFloat(node.Tag("time"), 64)
if err == nil {
delay = t
} else {
// TODO: Should log this as a warning or something
}
}
for i := 0; i < node.NumGroupOutputs(); i++ {
edge := node.GroupOutput(i)
adj = append(adj, edge.Dst().Id())
// frames that are part of groups can be cancelled at any time if the
// animation is supposed to proceed out of the group, so if an edge leads
// away from the current group we will assume that it has a delay of 0.
if node.Group() != nil && edge.Dst().Group() != node.Group() {
cost = append(cost, 0)
} else {
cost = append(cost, delay)
}
}
return
}
type Sprite struct {
shared *sharedSprite
anim_node *yed.Node
state_node *yed.Node
// number of times Think() has been called. This is mostly so that we can
// run some code the very first time that Think() is called.
thinks int
// current facing - needed to index into the appropriate sheet in shared
facing int
// previous facing - tracking this lets us prevent having to load/unload
// lots of facings if a sprite changes facings multiple times between thinks
prev_facing int
// Time remaining on the current frame of animation
togo int64
// If len(path) > 0 then this is the series of animation frames that will be
// used next
path []*yed.Node
// commands that have been accepted by the state graph but haven't been
// processed by the anim graph. When path is empty a cmd will be taken from
// this list and be used to generate the next path.
pending_cmds []command
// Used to keep track of the state that the current frame of animation
// represents.
anim_states []string
}
type command struct {
names []string // List of names of edges
group *commandGroup
}
type commandGroup struct {
// This is the tag that all of the sprites in this group will sync to
sync_tag string
// all of the sprites in this list must have this commandGroup as part of
// their next command to execute before any of them will execute it.
sprites []*Sprite
// If ready() ever returns true then this will be set to true and read()
// will always return true after that. This prevents a situation where one
// sprite starts executing this command and then other sprites think they
// aren't ready because one of them has already progressed passed this
// command.
was_ready bool
}
// Returns true iff all sprites in this group have no pending cmds before this
// one, and no nodes remaining in their immediate path.
func (cg *commandGroup) ready() bool {
if cg.was_ready {
return true
}
for _, sp := range cg.sprites {
if len(sp.path) > 0 { return false }
if len(sp.pending_cmds) == 0 { return false } // This one is a serious problem
if sp.pending_cmds[0].group != cg { return false }
}
cg.was_ready = true
return true
}
func (s *Sprite) State() string {
return s.state_node.Line(0)
}
func (s *Sprite) Anim() string {
return s.anim_node.Line(0)
}
func (s *Sprite) AnimState() string {
if len(s.anim_states) == 0 {
return s.State()
}
return s.anim_states[0]
}
// selects an outgoing edge from node random among those outgoing edges that
// have cmd listed in cmds. The random choice is weighted by the weights
// found in edge_data
func selectAnEdge(node *yed.Node, edge_data map[*yed.Edge]edgeData, cmds []string) *yed.Edge {
cmd_map := make(map[string]bool)
for _,cmd := range cmds {
cmd_map[cmd] = true
}
total := 0.0
for i := 0; i < node.NumOutputs(); i++ {
edge := node.Output(i)
if _,ok := cmd_map[edge_data[edge].cmd]; !ok { continue }
total += edge_data[edge].weight
}
if total > 0 {
pick := rand.Float64() * total
total = 0.0
for i := 0; i < node.NumOutputs(); i++ {
edge := node.Output(i)
if _,ok := cmd_map[edge_data[edge].cmd]; !ok { continue }
total += edge_data[edge].weight
if total >= pick {
return edge
}
}
}
return nil
}
// Returns the edge that leads from a, or an ancestor of a, to b, or an
// ancestor of b
func edgeTo(a,b *yed.Node) *yed.Edge {
for i := 0; i < a.NumGroupOutputs(); i++ {
edge := a.GroupOutput(i)
for cb := b; cb != nil; cb = cb.Group() {
if edge.Dst() == cb {
return edge
}
}
}
return nil
}
func CommandSync(ss []*Sprite, cmds []string, sync_tag string) {
// Go through each sprite, if it can execute the specified command then add
// it to the group (and if it can't, don't).
var group commandGroup
group.sync_tag = sync_tag
for i := range ss {
cmd := command{
names: []string{cmds[i]},
group: &group,
}
if ss[i].baseCommand(cmd) {
group.sprites = append(group.sprites, ss[i])
}
}
}
func (s *Sprite) baseCommand(cmd command) bool {
state_node := s.state_node
for _, name := range cmd.names {
state_edge := selectAnEdge(state_node, s.shared.edge_data, []string{name})
if state_edge == nil { return false }
state_node = state_edge.Dst()
}
for _, name := range cmd.names {
s.anim_states = append(s.anim_states, s.state_node.Line(0))
edge := selectAnEdge(s.state_node, s.shared.edge_data, []string{name})
s.state_node = edge.Dst()
}
state_edge := selectAnEdge(s.state_node, s.shared.edge_data, []string{""})
for state_edge != nil {
// If this command is synced then we first need to make sure that we'll
// be able to get to the appropriate sync tag
// if cmd.group != nil && cmd.group.sync_tag != "" {
// dst := state_edge.Dst()
// s.shared.node_data
// }
s.state_node = state_edge.Dst()
state_edge = selectAnEdge(s.state_node, s.shared.edge_data, []string{""})
}
s.pending_cmds = append(s.pending_cmds, cmd)
return true
}
func (s *Sprite) Command(cmd string) {
s.baseCommand(command{ names: []string{cmd}, group: nil })
}
func (s *Sprite) CommandN(cmds []string) {
s.baseCommand(command{ names: cmds, group: nil })
}
// This is a specialized wrapper around a yed.Graph that allows for the start
// node to be differentiated from the ending node in a path in the event that
// they are the same node in the original graph. This means that if a path is
// requested from one node to the same node that the resulting path will not
// be length 0.
type pathingGraph struct {
shared *sharedSprite
// graph *yed.Graph
start *yed.Node
// Edges will only be followed if there is no command associated with them,
// or if the command associated with them is the same as this command.
cmd string
}
func (p pathingGraph) NumVertex() int {
return p.shared.anim.NumNodes() + 1
}
func (p pathingGraph) Adjacent(n int) (adj []int, cost []float64) {
var node *yed.Node
if n == p.shared.anim.NumNodes() {
node = p.start
} else {
node = p.shared.anim.Node(n)
}
for i := 0; i < node.NumGroupOutputs(); i++ {
edge := node.GroupOutput(i)
if p.shared.edge_data[edge].cmd != "" && p.shared.edge_data[edge].cmd != p.cmd {
continue
}
adj = append(adj, edge.Dst().Id())
cost = append(cost, 1)
}
return
}
// If this returns a path with length 0 it means there wasn't a valid path
func (s *Sprite) findPathForCmd(cmd command, anim_node *yed.Node) []*yed.Node {
var node_path []*yed.Node
for _, name := range cmd.names {
g := pathingGraph{ shared: s.shared, start: anim_node, cmd: name }
var end []int
for i := 0; i < s.shared.anim.NumEdges(); i++ {
edge := s.shared.anim.Edge(i)
if s.shared.edge_data[edge].cmd == name {
end = append(end, edge.Dst().Id())
}
}
_, path := algorithm.Dijkstra(g, []int{ s.shared.anim.NumNodes() }, end)
for _,id := range path[1:] {
node_path = append(node_path, s.shared.anim.Node(id))
}
if len(node_path) > 0 {
anim_node = node_path[len(node_path) - 1]
}
}
return node_path
}
func (s *Sprite) applyPath(path []*yed.Node) {
for _, n := range path {
s.path = append(s.path, n)
}
}
func (s *Sprite) Dims() (dx, dy int) {
var rect FrameRect
var ok bool
fid := frameId{ facing: s.facing, node: s.anim_node.Id() }
rect,ok = s.shared.connector.rects[fid]
if !ok {
rect,ok = s.shared.facings[s.facing].rects[fid]
if !ok {
return 0, 0
}
}
dx = rect.X2 - rect.X
dy = rect.Y2 - rect.Y
return
}
func (s *Sprite) Bind() (x,y,x2,y2 float64) {
var rect FrameRect
var sh *sheet
var ok bool
fid := frameId{ facing: s.facing, node: s.anim_node.Id() }
var dx,dy float64
if rect,ok = s.shared.connector.rects[fid]; ok {
sh = s.shared.connector
} else if rect,ok = s.shared.facings[s.facing].rects[fid]; ok {
sh = s.shared.facings[s.facing]
} else {
error_texture.Bind(gl.TEXTURE_2D)
return
}
sh.texture.Bind(gl.TEXTURE_2D)
dx = float64(sh.dx)
dy = float64(sh.dy)
x = float64(rect.X) / dx
y = float64(rect.Y) / dy
x2 = float64(rect.X2) / dx
y2 = float64(rect.Y2) / dy
return
}
func (s *Sprite) Facing() int {
return s.facing
}
func (s *Sprite) Think(dt int64) {
if s.thinks == 0 {
s.shared.facings[0].Load()
s.togo = s.shared.node_data[s.anim_node].time
}
s.thinks++
if dt < 0 {
return
// panic("Can't have dt < 0")
}
var path []*yed.Node
if len(s.pending_cmds) > 0 && len(s.path) == 0 {
if s.pending_cmds[0].group == nil || s.pending_cmds[0].group.ready() {
path = s.findPathForCmd(s.pending_cmds[0], s.anim_node)
}
}
if path != nil {
s.applyPath(path)
s.pending_cmds = s.pending_cmds[1:]
}
if len(s.path) > 0 && s.anim_node.Group() != nil {
// If the current node is in a group that has an edge to the next node
// then we want to follow that edge immediately rather than waiting for
// the time for this frame to elapse
for i := 0; i < s.anim_node.NumGroupOutputs(); i++ {
edge := s.anim_node.GroupOutput(i)
if edge.Src() == s.anim_node { continue }
if edge.Dst() == s.path[0] {
s.togo = 0
}
}
}
if s.togo >= dt {
s.togo -= dt
if s.facing != s.prev_facing {
s.shared.facings[s.prev_facing].Unload()
s.shared.facings[s.facing].Load()
s.prev_facing = s.facing
}
return
}
dt -= s.togo
var next *yed.Node
if len(s.path) > 0 {
next = s.path[0]
s.path = s.path[1:]
} else {
edge := selectAnEdge(s.anim_node, s.shared.edge_data, []string{""})
if edge != nil {
next = edge.Dst()
} else {
next = s.anim_node
}
}
if next != nil {
edge := edgeTo(s.anim_node, next)
face := s.shared.edge_data[edge].facing
if face != 0 {
s.facing = (s.facing + face + len(s.shared.facings)) % len(s.shared.facings)
}
if s.shared.edge_data[edge].cmd != "" {
if len(s.anim_states) == 0 {
s.anim_states = nil
} else {
s.anim_states = s.anim_states[1:]
}
}
}
s.anim_node = next
s.togo = s.shared.node_data[s.anim_node].time
s.Think(dt)
}
type nodeData struct {
time int64
sync_tag string
}
type edgeData struct {
facing int
weight float64
cmd string
}
type Data struct {
state *yed.Node
anim *yed.Node
}
type FrameRect struct {
X,Y,X2,Y2 int
}
type Manager struct {
shared map[string]*sharedSprite
mutex sync.Mutex
}
func MakeManager() *Manager {
var m Manager
m.shared = make(map[string]*sharedSprite)
return &m
}
var the_manager *Manager
var error_texture gl.Texture
var gen_tex_once sync.Once
func init() {
the_manager = MakeManager()
}
func LoadSprite(path string) (*Sprite, error) {
return the_manager.LoadSprite(path)
}
func (m *Manager) loadSharedSprite(path string) error {
m.mutex.Lock()
defer m.mutex.Unlock()
if _,ok := m.shared[path]; ok {
return nil
}
ss,err := loadSharedSprite(path)
m.shared[path] = ss
return err
}
func (m *Manager) LoadSprite(path string) (*Sprite, error) {
// We can't run this during an init() function because it will get queued to
// run before the opengl context is created, so we just check here and run
// it if we haven't run it before.
gen_tex_once.Do(func() {
render.Queue(func() {
gl.Enable(gl.TEXTURE_2D)
error_texture = gl.GenTexture()
error_texture.Bind(gl.TEXTURE_2D)
gl.TexEnvf(gl.TEXTURE_ENV, gl.TEXTURE_ENV_MODE, gl.MODULATE)
gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR_MIPMAP_LINEAR)
gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR)
gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.REPEAT)
gl.TexParameterf(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.REPEAT)
pink := []byte{ 255, 0, 255, 255 }
glu.Build2DMipmaps(gl.TEXTURE_2D, 4, 1, 1, gl.RGBA, pink)
})
})
path = filepath.Clean(path)
err := m.loadSharedSprite(path)
if err != nil { return nil, err }
var s Sprite
m.mutex.Lock()
s.shared = m.shared[path]
m.mutex.Unlock()
s.anim_node = s.shared.anim_start
s.state_node = s.shared.state_start
return &s, nil
}
|
package server
import (
"bytes"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"golang.org/x/net/context"
"github.com/pachyderm/pachyderm"
"github.com/pachyderm/pachyderm/src/client"
pfsclient "github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/require"
"github.com/pachyderm/pachyderm/src/client/pkg/uuid"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
"github.com/pachyderm/pachyderm/src/server/pkg/workload"
ppsserver "github.com/pachyderm/pachyderm/src/server/pps"
pps_server "github.com/pachyderm/pachyderm/src/server/pps/server"
"k8s.io/kubernetes/pkg/api"
kube "k8s.io/kubernetes/pkg/client/unversioned"
)
const (
NUMFILES = 25
KB = 1024 * 1024
)
func TestJob(t *testing.T) {
testJob(t, 4)
}
func TestJobNoShard(t *testing.T) {
testJob(t, 0)
}
func testJob(t *testing.T, shards int) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
dataRepo := uniqueString("TestJob_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "", "")
require.NoError(t, err)
fileContent := "foo\n"
// We want to create lots of files so that each parallel job will be
// started with some files
numFiles := shards*100 + 100
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit.ID, fmt.Sprintf("file-%d", i), strings.NewReader(fileContent))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
job, err := c.CreateJob(
"",
[]string{"bash"},
[]string{fmt.Sprintf("cp %s %s", path.Join("/pfs", dataRepo, "*"), "/pfs/out")},
uint64(shards),
[]*ppsclient.JobInput{{
Commit: commit,
Method: client.ReduceMethod,
}},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
require.True(t, jobInfo.Parallelism > 0)
commitInfo, err := c.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID)
require.NoError(t, err)
require.Equal(t, pfsclient.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType)
for i := 0; i < numFiles; i++ {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, fmt.Sprintf("file-%d", i), 0, 0, "", nil, &buffer))
require.Equal(t, fileContent, buffer.String())
}
}
func TestPachCommitIdEnvVarInJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
shards := 0
c := getPachClient(t)
repos := []string{
uniqueString("TestJob_FriarTuck"),
uniqueString("TestJob_RobinHood"),
}
var commits []*pfsclient.Commit
for _, repo := range repos {
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
fileContent := "foo\n"
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader(fileContent))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
commits = append(commits, commit)
}
job, err := c.CreateJob(
"",
[]string{"bash"},
[]string{
"echo $PACH_OUTPUT_COMMIT_ID > /pfs/out/id",
fmt.Sprintf("echo $PACH_%v_COMMIT_ID > /pfs/out/input-id-%v", pps_server.RepoNameToEnvString(repos[0]), repos[0]),
fmt.Sprintf("echo $PACH_%v_COMMIT_ID > /pfs/out/input-id-%v", pps_server.RepoNameToEnvString(repos[1]), repos[1]),
},
uint64(shards),
[]*ppsclient.JobInput{
{
Commit: commits[0],
Method: client.ReduceMethod,
},
{
Commit: commits[1],
Method: client.ReduceMethod,
},
},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
require.True(t, jobInfo.Parallelism > 0)
commitInfo, err := c.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID)
require.NoError(t, err)
require.Equal(t, pfsclient.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, "id", 0, 0, "", nil, &buffer))
require.Equal(t, jobInfo.OutputCommit.ID, strings.TrimSpace(buffer.String()))
buffer.Reset()
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, fmt.Sprintf("input-id-%v", repos[0]), 0, 0, "", nil, &buffer))
require.Equal(t, jobInfo.Inputs[0].Commit.ID, strings.TrimSpace(buffer.String()))
buffer.Reset()
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, fmt.Sprintf("input-id-%v", repos[1]), 0, 0, "", nil, &buffer))
require.Equal(t, jobInfo.Inputs[1].Commit.ID, strings.TrimSpace(buffer.String()))
}
func TestDuplicatedJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
dataRepo := uniqueString("TestDuplicatedJob_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "", "")
require.NoError(t, err)
fileContent := "foo\n"
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader(fileContent))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipelineName := uniqueString("TestDuplicatedJob_pipeline")
_, err = c.PfsAPIClient.CreateRepo(
context.Background(),
&pfsclient.CreateRepoRequest{
Repo: client.NewRepo(pipelineName),
Provenance: []*pfsclient.Repo{client.NewRepo(dataRepo)},
},
)
require.NoError(t, err)
cmd := []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}
// Now we manually create the same job
req := &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: cmd,
},
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
Inputs: []*ppsclient.JobInput{{
Commit: commit,
}},
}
job1, err := c.PpsAPIClient.CreateJob(context.Background(), req)
require.NoError(t, err)
job2, err := c.PpsAPIClient.CreateJob(context.Background(), req)
require.NoError(t, err)
require.Equal(t, job1, job2)
req.Force = true
job3, err := c.PpsAPIClient.CreateJob(context.Background(), req)
require.NoError(t, err)
require.NotEqual(t, job1, job3)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, fileContent, buffer.String())
}
func TestLogs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
job, err := c.CreateJob(
"",
[]string{"echo", "foo"},
nil,
4,
[]*ppsclient.JobInput{},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
_, err = c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
// TODO we Sleep here because even though the job has completed kubernetes
// might not have even noticed the container was created yet
time.Sleep(10 * time.Second)
var buffer bytes.Buffer
require.NoError(t, c.GetLogs(job.ID, &buffer))
require.Equal(t, "0 | foo\n1 | foo\n2 | foo\n3 | foo\n", buffer.String())
// Should get an error if the job does not exist
require.YesError(t, c.GetLogs("nonexistent", &buffer))
}
func TestGrep(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
dataRepo := uniqueString("TestGrep_data")
c := getPachClient(t)
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "", "")
require.NoError(t, err)
for i := 0; i < 100; i++ {
_, err = c.PutFile(dataRepo, commit.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo\nbar\nfizz\nbuzz\n"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
job1, err := c.CreateJob(
"",
[]string{"bash"},
[]string{fmt.Sprintf("grep foo /pfs/%s/* >/pfs/out/foo", dataRepo)},
1,
[]*ppsclient.JobInput{{Commit: commit}},
"",
)
require.NoError(t, err)
job2, err := c.CreateJob(
"",
[]string{"bash"},
[]string{fmt.Sprintf("grep foo /pfs/%s/* >/pfs/out/foo", dataRepo)},
4,
[]*ppsclient.JobInput{{Commit: commit}},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
job1Info, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
inspectJobRequest.Job = job2
job2Info, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
repo1Info, err := c.InspectRepo(job1Info.OutputCommit.Repo.Name)
require.NoError(t, err)
repo2Info, err := c.InspectRepo(job2Info.OutputCommit.Repo.Name)
require.NoError(t, err)
require.Equal(t, repo1Info.SizeBytes, repo2Info.SizeBytes)
}
func TestJobLongOutputLine(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
job, err := c.CreateJob(
"",
[]string{"sh"},
[]string{"yes | tr -d '\\n' | head -c 1000000 > /pfs/out/file"},
1,
[]*ppsclient.JobInput{},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
}
func TestPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "", "")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
// Do second commit to repo
commit2, err := c.StartCommit(dataRepo, commit1.ID, "")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
listCommitRequest = &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
FromCommit: []*pfsclient.Commit{outCommits[0].Commit},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
require.NotNil(t, listCommitResponse.CommitInfo[0].ParentCommit)
require.Equal(t, outCommits[0].Commit.ID, listCommitResponse.CommitInfo[0].ParentCommit.ID)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, "bar\n", buffer.String())
require.NoError(t, c.DeletePipeline(pipelineName))
pipelineInfos, err := c.PpsAPIClient.ListPipeline(context.Background(), &ppsclient.ListPipelineRequest{})
require.NoError(t, err)
for _, pipelineInfo := range pipelineInfos.PipelineInfo {
require.True(t, pipelineInfo.Pipeline.Name != pipelineName)
}
// Do third commit to repo; this time pipeline should not run since it's been deleted
commit3, err := c.StartCommit(dataRepo, commit2.ID, "")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit3.ID, "file", strings.NewReader("buzz\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit3.ID))
// We will sleep a while to wait for the pipeline to actually get cancelled
// Also if the pipeline didn't get cancelled (due to a bug), we sleep a while
// to let the pipeline commit
time.Sleep(5 * time.Second)
listCommitRequest = &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
// there should only be two commits in the pipeline
require.Equal(t, 2, len(listCommitResponse.CommitInfo))
}
func TestPipelineWithTooMuchParallelism(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipelineWithTooMuchParallelism_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
// This pipeline will fail if any pod sees empty input, since cp won't
// be able to find the file.
// We have parallelism set to 3 so that if we actually start 3 pods,
// which would be a buggy behavior, some jobs don't see any files
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
3,
[]*ppsclient.PipelineInput{{
Repo: &pfsclient.Repo{Name: dataRepo},
// Use reduce method so only one pod gets the file
Method: client.ReduceMethod,
}},
))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "", "")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
require.Equal(t, false, outCommits[0].Cancelled)
}
func TestPipelineWithEmptyInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"NEW_UUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)",
"echo foo > /pfs/out/$NEW_UUID",
},
3,
nil,
))
// Manually trigger the pipeline
job, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
require.Equal(t, 3, int(jobInfo.Parallelism))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
fileInfos, err := c.ListFile(outRepo.Name, outCommits[0].Commit.ID, "", "", nil, false)
require.NoError(t, err)
require.Equal(t, 3, len(fileInfos))
// Make sure that each job gets a different ID
job2, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
require.True(t, job.ID != job2.ID)
}
func TestPipelineThatWritesToOneFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"dd if=/dev/zero of=/pfs/out/file bs=10 count=1",
},
3,
nil,
))
// Manually trigger the pipeline
_, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, 30, buffer.Len())
}
func TestPipelineThatOverwritesFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"echo foo > /pfs/out/file",
},
3,
nil,
))
// Manually trigger the pipeline
job, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\n", buffer.String())
// Manually trigger the pipeline
_, err = c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
ParentJob: job,
})
require.NoError(t, err)
listCommitRequest = &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
FromCommit: []*pfsclient.Commit{outCommits[0].Commit},
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer2))
// we expect only 3 foos here because > _overwrites_ rather than appending.
// Appending is done with >>.
require.Equal(t, "foo\nfoo\nfoo\n", buffer2.String())
}
func TestPipelineThatAppendsToFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"echo foo >> /pfs/out/file",
},
3,
nil,
))
// Manually trigger the pipeline
job, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\n", buffer.String())
// Manually trigger the pipeline
_, err = c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
ParentJob: job,
})
require.NoError(t, err)
listCommitRequest = &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
FromCommit: []*pfsclient.Commit{outCommits[0].Commit},
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer2))
require.Equal(t, "foo\nfoo\nfoo\nfoo\nfoo\nfoo\n", buffer2.String())
}
func TestRemoveAndAppend(t *testing.T) {
testParellelRemoveAndAppend(t, 1)
}
func TestParellelRemoveAndAppend(t *testing.T) {
// This test does not pass on Travis which is why it's skipped right now As
// soon as we have a hypothesis for why this fails on travis but not
// locally we should un skip this test and try to fix it.
t.Skip()
testParellelRemoveAndAppend(t, 3)
}
func testParellelRemoveAndAppend(t *testing.T, parallelism int) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel() //cleanup resources
job1, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"echo foo > /pfs/out/file",
},
},
Parallelism: uint64(parallelism),
})
require.NoError(t, err)
inspectJobRequest1 := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
jobInfo1, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest1)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo1.State)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo1.OutputCommit.Repo.Name, jobInfo1.OutputCommit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, strings.Repeat("foo\n", parallelism), buffer.String())
job2, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"unlink /pfs/out/file && echo bar > /pfs/out/file",
},
},
Parallelism: uint64(parallelism),
ParentJob: job1,
})
require.NoError(t, err)
inspectJobRequest2 := &ppsclient.InspectJobRequest{
Job: job2,
BlockState: true,
}
jobInfo2, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest2)
require.NoError(t, err)
c.GetLogs(jobInfo2.Job.ID, os.Stdout)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo2.State)
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(jobInfo2.OutputCommit.Repo.Name, jobInfo2.OutputCommit.ID, "file", 0, 0, "", nil, &buffer2))
require.Equal(t, strings.Repeat("bar\n", parallelism), buffer2.String())
}
func TestWorkload(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
seed := time.Now().UnixNano()
require.NoError(t, workload.RunWorkload(c, rand.New(rand.NewSource(seed)), 100))
}
func TestSharding(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
repo := uniqueString("TestSharding")
c := getPachClient(t)
err := c.CreateRepo(repo)
require.NoError(t, err)
commit, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
var wg sync.WaitGroup
for i := 0; i < NUMFILES; i++ {
i := i
wg.Add(1)
go func() {
defer wg.Done()
rand := rand.New(rand.NewSource(int64(i)))
_, err = c.PutFile(repo, commit.ID, fmt.Sprintf("file%d", i), workload.NewReader(rand, KB))
require.NoError(t, err)
}()
}
wg.Wait()
err = c.FinishCommit(repo, commit.ID)
require.NoError(t, err)
wg = sync.WaitGroup{}
for i := 0; i < NUMFILES; i++ {
i := i
wg.Add(1)
go func() {
defer wg.Done()
var buffer1Shard bytes.Buffer
var buffer4Shard bytes.Buffer
shard := &pfsclient.Shard{FileModulus: 1, BlockModulus: 1}
err := c.GetFile(repo, commit.ID,
fmt.Sprintf("file%d", i), 0, 0, "", shard, &buffer1Shard)
require.NoError(t, err)
shard.BlockModulus = 4
for blockNumber := uint64(0); blockNumber < 4; blockNumber++ {
shard.BlockNumber = blockNumber
err := c.GetFile(repo, commit.ID,
fmt.Sprintf("file%d", i), 0, 0, "", shard, &buffer4Shard)
require.NoError(t, err)
}
require.Equal(t, buffer1Shard.Len(), buffer4Shard.Len())
}()
}
wg.Wait()
}
func TestFromCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
repo := uniqueString("TestFromCommit")
c := getPachClient(t)
seed := time.Now().UnixNano()
rand := rand.New(rand.NewSource(seed))
err := c.CreateRepo(repo)
require.NoError(t, err)
commit1, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "file", workload.NewReader(rand, KB))
require.NoError(t, err)
err = c.FinishCommit(repo, commit1.ID)
require.NoError(t, err)
commit2, err := c.StartCommit(repo, commit1.ID, "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit2.ID, "file", workload.NewReader(rand, KB))
require.NoError(t, err)
err = c.FinishCommit(repo, commit2.ID)
require.NoError(t, err)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(repo, commit2.ID, "file", 0, 0, commit1.ID, nil, &buffer))
require.Equal(t, buffer.Len(), KB)
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(repo, commit2.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, buffer.Len(), 2*KB)
}
func TestSimple(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("TestSimple")
require.NoError(t, c.CreateRepo(repo))
commit1, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "foo", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit1.ID))
commitInfos, err := c.ListCommit([]string{repo}, nil, client.CommitTypeNone, false, false, nil)
require.NoError(t, err)
require.Equal(t, 1, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(repo, commit1.ID, "foo", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
commit2, err := c.StartCommit(repo, commit1.ID, "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit2.ID, "foo", strings.NewReader("foo\n"))
require.NoError(t, err)
err = c.FinishCommit(repo, commit2.ID)
require.NoError(t, err)
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(repo, commit1.ID, "foo", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(repo, commit2.ID, "foo", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\nfoo\n", buffer.String())
}
func TestPipelineWithMultipleInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
inputRepo1 := uniqueString("inputRepo")
require.NoError(t, c.CreateRepo(inputRepo1))
inputRepo2 := uniqueString("inputRepo")
require.NoError(t, c.CreateRepo(inputRepo2))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{fmt.Sprintf(`
repo1=%s
repo2=%s
echo $repo1
ls -1 /pfs/$repo1
echo $repo2
ls -1 /pfs/$repo2
for f1 in /pfs/$repo1/*
do
for f2 in /pfs/$repo2/*
do
v1=$(<$f1)
v2=$(<$f2)
echo $v1$v2 >> /pfs/out/file
done
done
`, inputRepo1, inputRepo2)},
4,
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: inputRepo1},
Method: client.IncrementalReduceMethod,
},
{
Repo: &pfsclient.Repo{Name: inputRepo2},
Method: client.IncrementalReduceMethod,
},
},
))
content := "foo"
numfiles := 10
commit1, err := c.StartCommit(inputRepo1, "", "")
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo1, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo1, commit1.ID))
commit2, err := c.StartCommit(inputRepo2, "", "")
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo2, commit2.ID, fmt.Sprintf("file%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo2, commit2.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{{pipelineName}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
fileInfos, err := c.ListFile(pipelineName, outCommits[0].Commit.ID, "", "", nil, false)
require.NoError(t, err)
require.Equal(t, 1, len(fileInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
lines := strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, numfiles*numfiles, len(lines))
for _, line := range lines {
require.Equal(t, len(content)*2, len(line))
}
commit3, err := c.StartCommit(inputRepo1, commit1.ID, "")
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo1, commit3.ID, fmt.Sprintf("file2-%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo1, commit3.ID))
listCommitRequest.FromCommit = append(listCommitRequest.FromCommit, outCommits[0].Commit)
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
lines = strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, 2*numfiles*numfiles, len(lines))
for _, line := range lines {
require.Equal(t, len(content)*2, len(line))
}
commit4, err := c.StartCommit(inputRepo2, commit2.ID, "")
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo2, commit4.ID, fmt.Sprintf("file2-%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo2, commit4.ID))
listCommitRequest.FromCommit[0] = outCommits[0].Commit
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
lines = strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, 4*numfiles*numfiles, len(lines))
for _, line := range lines {
require.Equal(t, len(content)*2, len(line))
}
}
func TestPipelineWithGlobalMethod(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
globalRepo := uniqueString("inputRepo")
require.NoError(t, c.CreateRepo(globalRepo))
numfiles := 20
pipelineName := uniqueString("pipeline")
parallelism := 2
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
// this script simply outputs the number of files under the global repo
[]string{fmt.Sprintf(`
numfiles=(/pfs/%s/*)
numfiles=${#numfiles[@]}
echo $numfiles > /pfs/out/file
`, globalRepo)},
uint64(parallelism),
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: globalRepo},
Method: client.GlobalMethod,
},
},
))
content := "foo"
commit, err := c.StartCommit(globalRepo, "", "")
require.NoError(t, err)
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(globalRepo, commit.ID, fmt.Sprintf("file%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(globalRepo, commit.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{{pipelineName}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
fileInfos, err := c.ListFile(pipelineName, outCommits[0].Commit.ID, "", "", nil, false)
require.NoError(t, err)
require.Equal(t, 1, len(fileInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
lines := strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, parallelism, len(lines)) // each job outputs one line
for _, line := range lines {
require.Equal(t, fmt.Sprintf("%d", numfiles), line)
}
}
func TestPipelineWithPrevRepoAndIncrementalReduceMethod(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("repo")
require.NoError(t, c.CreateRepo(repo))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{fmt.Sprintf(`
cat /pfs/%s/file >>/pfs/out/file
if [ -d "/pfs/prev" ]; then
cat /pfs/prev/file >>/pfs/out/file
fi
`, repo)},
1,
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
Method: client.IncrementalReduceMethod,
},
},
))
commit1, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, c.FinishCommit(repo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{{pipelineName}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
lines := strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, 1, len(lines))
require.Equal(t, "foo", lines[0])
commit2, err := c.StartCommit(repo, commit1.ID, "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, c.FinishCommit(repo, commit2.ID))
listCommitRequest = &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{{pipelineName}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
FromCommit: []*pfsclient.Commit{outCommits[0].Commit},
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer2))
lines = strings.Split(strings.TrimSpace(buffer2.String()), "\n")
require.Equal(t, 3, len(lines))
require.Equal(t, "foo", lines[0])
require.Equal(t, "bar", lines[1])
require.Equal(t, "foo", lines[2])
}
func TestPipelineThatUseNonexistentInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
pipelineName := uniqueString("pipeline")
require.YesError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
1,
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: "nonexistent"},
},
},
))
}
func TestPipelineWhoseInputsGetDeleted(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("repo")
require.NoError(t, c.CreateRepo(repo))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{fmt.Sprintf(``, repo)},
1,
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
},
},
))
// Shouldn't be able to delete the input repo because the pipeline
// is still running
require.YesError(t, c.DeleteRepo(repo))
// The correct flow to delete the input repo
require.NoError(t, c.DeletePipeline(pipelineName))
require.NoError(t, c.DeleteRepo(pipelineName))
require.NoError(t, c.DeleteRepo(repo))
}
// This test fails if you updated some static assets (such as doc/pipeline_spec.md)
// that are used in code but forgot to run:
// $ make assets
func TestAssets(t *testing.T) {
assetPaths := []string{"doc/pipeline_spec.md"}
for _, path := range assetPaths {
doc, err := ioutil.ReadFile(filepath.Join(os.Getenv("GOPATH"), "src/github.com/pachyderm/pachyderm/", path))
if err != nil {
t.Fatal(err)
}
asset, err := pachyderm.Asset(path)
if err != nil {
t.Fatal(err)
}
require.Equal(t, doc, asset)
}
}
// TestProvenance creates a pipeline DAG that's not a transitive reduction
// It looks like this:
// A
// | \
// v v
// B-->C
// When we commit to A we expect to see 1 commit on C rather than 2.
func TestProvenance(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
aRepo := uniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
bPipeline := uniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(aRepo)}},
))
cPipeline := uniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("diff %s %s >/pfs/out/file",
path.Join("/pfs", aRepo, "file"), path.Join("/pfs", bPipeline, "file"))},
1,
[]*ppsclient.PipelineInput{
{Repo: client.NewRepo(aRepo)},
{Repo: client.NewRepo(bPipeline)},
},
))
// commit to aRepo
commit1, err := c.StartCommit(aRepo, "", "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit1.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit(aRepo, commit1.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
commit2, err := c.StartCommit(aRepo, "", "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit2.ID))
commitInfos, err = c.FlushCommit([]*pfsclient.Commit{client.NewCommit(aRepo, commit2.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
// There should only be 2 commits on cRepo
commitInfos, err = c.ListCommit([]string{cPipeline}, nil, pfsclient.CommitType_COMMIT_TYPE_READ, false, false, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
for _, commitInfo := range commitInfos {
// C takes the diff of 2 files that should always be the same, so we
// expect an empty file
fileInfo, err := c.InspectFile(cPipeline, commitInfo.Commit.ID, "file", "", nil)
require.NoError(t, err)
require.Equal(t, uint64(0), fileInfo.SizeBytes)
}
}
func TestDirectory(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel() //cleanup resources
job1, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"mkdir /pfs/out/dir",
"echo foo >> /pfs/out/dir/file",
},
},
Parallelism: 3,
})
require.NoError(t, err)
inspectJobRequest1 := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
jobInfo1, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest1)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo1.State)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo1.OutputCommit.Repo.Name, jobInfo1.OutputCommit.ID, "dir/file", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\n", buffer.String())
job2, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"echo bar >> /pfs/out/dir/file",
},
},
Parallelism: 3,
ParentJob: job1,
})
require.NoError(t, err)
inspectJobRequest2 := &ppsclient.InspectJobRequest{
Job: job2,
BlockState: true,
}
jobInfo2, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest2)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo2.State)
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(jobInfo2.OutputCommit.Repo.Name, jobInfo2.OutputCommit.ID, "dir/file", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\nbar\nbar\nbar\n", buffer.String())
}
func TestFailedJobReadData(t *testing.T) {
// We want to enable users to be able to read data from cancelled commits for debugging purposes`
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
shards := 0
c := getPachClient(t)
repo := uniqueString("TestJob_Foo")
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
fileContent := "foo\n"
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader(fileContent))
require.NoError(t, err)
err = c.FinishCommit(repo, commit.ID)
require.NoError(t, err)
job, err := c.CreateJob(
"",
[]string{"bash"},
[]string{
"echo fubar > /pfs/out/file",
"exit 1",
},
uint64(shards),
[]*ppsclient.JobInput{
{
Commit: commit,
Method: client.ReduceMethod,
},
},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_FAILURE.String(), jobInfo.State.String())
require.True(t, jobInfo.Parallelism > 0)
c.GetLogs(jobInfo.Job.ID, os.Stdout)
commitInfo, err := c.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID)
require.NoError(t, err)
require.Equal(t, pfsclient.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType)
require.Equal(t, true, commitInfo.Cancelled)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, "fubar", strings.TrimSpace(buffer.String()))
}
// TestFlushCommit
func TestFlushCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
prefix := uniqueString("repo")
makeRepoName := func(i int) string {
return fmt.Sprintf("%s_%d", prefix, i)
}
sourceRepo := makeRepoName(0)
require.NoError(t, c.CreateRepo(sourceRepo))
// Create a five-stage pipeline
numStages := 5
for i := 0; i < numStages; i++ {
repo := makeRepoName(i)
require.NoError(t, c.CreatePipeline(
makeRepoName(i+1),
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
))
}
test := func(parent string) string {
commit, err := c.StartCommit(sourceRepo, parent, "")
require.NoError(t, err)
_, err = c.PutFile(sourceRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(sourceRepo, commit.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil)
require.NoError(t, err)
require.Equal(t, numStages, len(commitInfos))
return commit.ID
}
// Run the test twice, once on a orphan commit and another on
// a commit with a parent
commit := test("")
test(commit)
}
// TestFlushCommitWithFailure is similar to TestFlushCommit except that
// the pipeline is designed to fail
func TestFlushCommitWithFailure(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
prefix := uniqueString("repo")
makeRepoName := func(i int) string {
return fmt.Sprintf("%s_%d", prefix, i)
}
sourceRepo := makeRepoName(0)
require.NoError(t, c.CreateRepo(sourceRepo))
// Create a five-stage pipeline; the third stage is designed to fail
numStages := 5
for i := 0; i < numStages; i++ {
fileName := "file"
if i == 3 {
fileName = "nonexistent"
}
repo := makeRepoName(i)
require.NoError(t, c.CreatePipeline(
makeRepoName(i+1),
"",
[]string{"cp", path.Join("/pfs", repo, fileName), "/pfs/out/file"},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
))
}
commit, err := c.StartCommit(sourceRepo, "", "")
require.NoError(t, err)
_, err = c.PutFile(sourceRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(sourceRepo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil)
fmt.Println(err.Error())
require.YesError(t, err)
}
// TestRecreatingPipeline tracks #432
func TestRecreatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipeline := uniqueString("pipeline")
createPipelineAndRunJob := func() {
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
))
commit, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{{pipeline}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
}
// Do it twice. We expect jobs to be created on both runs.
createPipelineAndRunJob()
require.NoError(t, c.DeleteRepo(pipeline))
require.NoError(t, c.DeletePipeline(pipeline))
createPipelineAndRunJob()
}
func TestPipelineState(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipeline := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
))
time.Sleep(5 * time.Second) // wait for this pipeline to get picked up
pipelineInfo, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, ppsclient.PipelineState_PIPELINE_RUNNING, pipelineInfo.State)
// Now we introduce an error to the pipeline by removing its output repo
// and starting a job
require.NoError(t, c.DeleteRepo(pipeline))
commit, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
// So the state of the pipeline will alternate between running and
// restarting. We just want to make sure that it has definitely restarted.
var states []interface{}
for i := 0; i < 10; i++ {
time.Sleep(1 * time.Second)
pipelineInfo, err = c.InspectPipeline(pipeline)
require.NoError(t, err)
states = append(states, pipelineInfo.State)
}
require.EqualOneOf(t, states, ppsclient.PipelineState_PIPELINE_RESTARTING)
}
func TestClusterFunctioningAfterMembershipChange(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
k := getKubeClient(t)
scalePachd(t, k)
// Wait for the cluster to stablize... ideally we shouldn't have to
// do that.
time.Sleep(20 * time.Second)
TestJob(t)
}
// scalePachd scales the number of pachd nodes to anywhere from 1 to
// twice the original number
// It's guaranteed that the new replica number will be different from
// the original
func scalePachd(t *testing.T, k *kube.Client) {
rc := k.ReplicationControllers(api.NamespaceDefault)
pachdRc, err := rc.Get("pachd")
require.NoError(t, err)
originalReplicas := pachdRc.Spec.Replicas
for {
pachdRc.Spec.Replicas = rand.Intn(originalReplicas*2) + 1
if pachdRc.Spec.Replicas != originalReplicas {
break
}
}
fmt.Printf("scaling pachd to %d replicas\n", pachdRc.Spec.Replicas)
_, err = rc.Update(pachdRc)
require.NoError(t, err)
}
func TestScrubbedErrors(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
_, err := c.InspectPipeline("blah")
require.Equal(t, "PipelineInfos blah not found", err.Error())
err = c.CreatePipeline(
"lskdjf$#%^ERTYC",
"",
[]string{},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: "test"}}},
)
require.Equal(t, "Repo test not found", err.Error())
_, err = c.CreateJob("askjdfhgsdflkjh", []string{}, []string{}, 0, []*ppsclient.JobInput{client.NewJobInput("bogusRepo", "bogusCommit", client.DefaultMethod)}, "")
require.Matches(t, "Repo job_.* not found", err.Error())
_, err = c.InspectJob("blah", true)
require.Equal(t, "JobInfos blah not found", err.Error())
home := os.Getenv("HOME")
f, err := os.Create(filepath.Join(home, "/tmpfile"))
defer func() {
os.Remove(filepath.Join(home, "/tmpfile"))
}()
require.NoError(t, err)
err = c.GetLogs("bogusJobId", f)
require.Equal(t, "Job bogusJobId not found", err.Error())
}
func TestAcceptReturnCode(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
job, err := c.PpsAPIClient.CreateJob(
context.Background(),
&ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{"exit 1"},
AcceptReturnCode: []int64{1},
},
},
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
}
func getPachClient(t *testing.T) *client.APIClient {
client, err := client.NewFromAddress("0.0.0.0:30650")
require.NoError(t, err)
return client
}
func getKubeClient(t *testing.T) *kube.Client {
config := &kube.Config{
Host: "0.0.0.0:8080",
Insecure: false,
}
k, err := kube.New(config)
require.NoError(t, err)
return k
}
func uniqueString(prefix string) string {
return prefix + "_" + uuid.NewWithoutDashes()[0:12]
}
Update integratn tests to be compatible w new k8s
package server
import (
"bytes"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"golang.org/x/net/context"
"github.com/pachyderm/pachyderm"
"github.com/pachyderm/pachyderm/src/client"
pfsclient "github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/require"
"github.com/pachyderm/pachyderm/src/client/pkg/uuid"
ppsclient "github.com/pachyderm/pachyderm/src/client/pps"
"github.com/pachyderm/pachyderm/src/server/pkg/workload"
ppsserver "github.com/pachyderm/pachyderm/src/server/pps"
pps_server "github.com/pachyderm/pachyderm/src/server/pps/server"
"k8s.io/kubernetes/pkg/api"
kube_client "k8s.io/kubernetes/pkg/client/restclient"
kube "k8s.io/kubernetes/pkg/client/unversioned"
)
const (
NUMFILES = 25
KB = 1024 * 1024
)
func TestJob(t *testing.T) {
testJob(t, 4)
}
func TestJobNoShard(t *testing.T) {
testJob(t, 0)
}
func testJob(t *testing.T, shards int) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
dataRepo := uniqueString("TestJob_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "", "")
require.NoError(t, err)
fileContent := "foo\n"
// We want to create lots of files so that each parallel job will be
// started with some files
numFiles := shards*100 + 100
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit.ID, fmt.Sprintf("file-%d", i), strings.NewReader(fileContent))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
job, err := c.CreateJob(
"",
[]string{"bash"},
[]string{fmt.Sprintf("cp %s %s", path.Join("/pfs", dataRepo, "*"), "/pfs/out")},
uint64(shards),
[]*ppsclient.JobInput{{
Commit: commit,
Method: client.ReduceMethod,
}},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
require.True(t, jobInfo.Parallelism > 0)
commitInfo, err := c.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID)
require.NoError(t, err)
require.Equal(t, pfsclient.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType)
for i := 0; i < numFiles; i++ {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, fmt.Sprintf("file-%d", i), 0, 0, "", nil, &buffer))
require.Equal(t, fileContent, buffer.String())
}
}
func TestPachCommitIdEnvVarInJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
shards := 0
c := getPachClient(t)
repos := []string{
uniqueString("TestJob_FriarTuck"),
uniqueString("TestJob_RobinHood"),
}
var commits []*pfsclient.Commit
for _, repo := range repos {
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
fileContent := "foo\n"
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader(fileContent))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
commits = append(commits, commit)
}
job, err := c.CreateJob(
"",
[]string{"bash"},
[]string{
"echo $PACH_OUTPUT_COMMIT_ID > /pfs/out/id",
fmt.Sprintf("echo $PACH_%v_COMMIT_ID > /pfs/out/input-id-%v", pps_server.RepoNameToEnvString(repos[0]), repos[0]),
fmt.Sprintf("echo $PACH_%v_COMMIT_ID > /pfs/out/input-id-%v", pps_server.RepoNameToEnvString(repos[1]), repos[1]),
},
uint64(shards),
[]*ppsclient.JobInput{
{
Commit: commits[0],
Method: client.ReduceMethod,
},
{
Commit: commits[1],
Method: client.ReduceMethod,
},
},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
require.True(t, jobInfo.Parallelism > 0)
commitInfo, err := c.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID)
require.NoError(t, err)
require.Equal(t, pfsclient.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, "id", 0, 0, "", nil, &buffer))
require.Equal(t, jobInfo.OutputCommit.ID, strings.TrimSpace(buffer.String()))
buffer.Reset()
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, fmt.Sprintf("input-id-%v", repos[0]), 0, 0, "", nil, &buffer))
require.Equal(t, jobInfo.Inputs[0].Commit.ID, strings.TrimSpace(buffer.String()))
buffer.Reset()
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, fmt.Sprintf("input-id-%v", repos[1]), 0, 0, "", nil, &buffer))
require.Equal(t, jobInfo.Inputs[1].Commit.ID, strings.TrimSpace(buffer.String()))
}
func TestDuplicatedJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
dataRepo := uniqueString("TestDuplicatedJob_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "", "")
require.NoError(t, err)
fileContent := "foo\n"
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader(fileContent))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipelineName := uniqueString("TestDuplicatedJob_pipeline")
_, err = c.PfsAPIClient.CreateRepo(
context.Background(),
&pfsclient.CreateRepoRequest{
Repo: client.NewRepo(pipelineName),
Provenance: []*pfsclient.Repo{client.NewRepo(dataRepo)},
},
)
require.NoError(t, err)
cmd := []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}
// Now we manually create the same job
req := &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: cmd,
},
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
Inputs: []*ppsclient.JobInput{{
Commit: commit,
}},
}
job1, err := c.PpsAPIClient.CreateJob(context.Background(), req)
require.NoError(t, err)
job2, err := c.PpsAPIClient.CreateJob(context.Background(), req)
require.NoError(t, err)
require.Equal(t, job1, job2)
req.Force = true
job3, err := c.PpsAPIClient.CreateJob(context.Background(), req)
require.NoError(t, err)
require.NotEqual(t, job1, job3)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, fileContent, buffer.String())
}
func TestLogs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
job, err := c.CreateJob(
"",
[]string{"echo", "foo"},
nil,
4,
[]*ppsclient.JobInput{},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
_, err = c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
// TODO we Sleep here because even though the job has completed kubernetes
// might not have even noticed the container was created yet
time.Sleep(10 * time.Second)
var buffer bytes.Buffer
require.NoError(t, c.GetLogs(job.ID, &buffer))
require.Equal(t, "0 | foo\n1 | foo\n2 | foo\n3 | foo\n", buffer.String())
// Should get an error if the job does not exist
require.YesError(t, c.GetLogs("nonexistent", &buffer))
}
func TestGrep(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
dataRepo := uniqueString("TestGrep_data")
c := getPachClient(t)
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "", "")
require.NoError(t, err)
for i := 0; i < 100; i++ {
_, err = c.PutFile(dataRepo, commit.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo\nbar\nfizz\nbuzz\n"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
job1, err := c.CreateJob(
"",
[]string{"bash"},
[]string{fmt.Sprintf("grep foo /pfs/%s/* >/pfs/out/foo", dataRepo)},
1,
[]*ppsclient.JobInput{{Commit: commit}},
"",
)
require.NoError(t, err)
job2, err := c.CreateJob(
"",
[]string{"bash"},
[]string{fmt.Sprintf("grep foo /pfs/%s/* >/pfs/out/foo", dataRepo)},
4,
[]*ppsclient.JobInput{{Commit: commit}},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
job1Info, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
inspectJobRequest.Job = job2
job2Info, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
repo1Info, err := c.InspectRepo(job1Info.OutputCommit.Repo.Name)
require.NoError(t, err)
repo2Info, err := c.InspectRepo(job2Info.OutputCommit.Repo.Name)
require.NoError(t, err)
require.Equal(t, repo1Info.SizeBytes, repo2Info.SizeBytes)
}
func TestJobLongOutputLine(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
job, err := c.CreateJob(
"",
[]string{"sh"},
[]string{"yes | tr -d '\\n' | head -c 1000000 > /pfs/out/file"},
1,
[]*ppsclient.JobInput{},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
}
func TestPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: dataRepo}}},
))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "", "")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
// Do second commit to repo
commit2, err := c.StartCommit(dataRepo, commit1.ID, "")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
listCommitRequest = &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
FromCommit: []*pfsclient.Commit{outCommits[0].Commit},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
require.NotNil(t, listCommitResponse.CommitInfo[0].ParentCommit)
require.Equal(t, outCommits[0].Commit.ID, listCommitResponse.CommitInfo[0].ParentCommit.ID)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, "bar\n", buffer.String())
require.NoError(t, c.DeletePipeline(pipelineName))
pipelineInfos, err := c.PpsAPIClient.ListPipeline(context.Background(), &ppsclient.ListPipelineRequest{})
require.NoError(t, err)
for _, pipelineInfo := range pipelineInfos.PipelineInfo {
require.True(t, pipelineInfo.Pipeline.Name != pipelineName)
}
// Do third commit to repo; this time pipeline should not run since it's been deleted
commit3, err := c.StartCommit(dataRepo, commit2.ID, "")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit3.ID, "file", strings.NewReader("buzz\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit3.ID))
// We will sleep a while to wait for the pipeline to actually get cancelled
// Also if the pipeline didn't get cancelled (due to a bug), we sleep a while
// to let the pipeline commit
time.Sleep(5 * time.Second)
listCommitRequest = &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
// there should only be two commits in the pipeline
require.Equal(t, 2, len(listCommitResponse.CommitInfo))
}
func TestPipelineWithTooMuchParallelism(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create repos
dataRepo := uniqueString("TestPipelineWithTooMuchParallelism_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
// This pipeline will fail if any pod sees empty input, since cp won't
// be able to find the file.
// We have parallelism set to 3 so that if we actually start 3 pods,
// which would be a buggy behavior, some jobs don't see any files
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
3,
[]*ppsclient.PipelineInput{{
Repo: &pfsclient.Repo{Name: dataRepo},
// Use reduce method so only one pod gets the file
Method: client.ReduceMethod,
}},
))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "", "")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
require.Equal(t, false, outCommits[0].Cancelled)
}
func TestPipelineWithEmptyInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"NEW_UUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)",
"echo foo > /pfs/out/$NEW_UUID",
},
3,
nil,
))
// Manually trigger the pipeline
job, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
require.Equal(t, 3, int(jobInfo.Parallelism))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
fileInfos, err := c.ListFile(outRepo.Name, outCommits[0].Commit.ID, "", "", nil, false)
require.NoError(t, err)
require.Equal(t, 3, len(fileInfos))
// Make sure that each job gets a different ID
job2, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
require.True(t, job.ID != job2.ID)
}
func TestPipelineThatWritesToOneFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"dd if=/dev/zero of=/pfs/out/file bs=10 count=1",
},
3,
nil,
))
// Manually trigger the pipeline
_, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, 30, buffer.Len())
}
func TestPipelineThatOverwritesFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"echo foo > /pfs/out/file",
},
3,
nil,
))
// Manually trigger the pipeline
job, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\n", buffer.String())
// Manually trigger the pipeline
_, err = c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
ParentJob: job,
})
require.NoError(t, err)
listCommitRequest = &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
FromCommit: []*pfsclient.Commit{outCommits[0].Commit},
Block: true,
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer2))
// we expect only 3 foos here because > _overwrites_ rather than appending.
// Appending is done with >>.
require.Equal(t, "foo\nfoo\nfoo\n", buffer2.String())
}
func TestPipelineThatAppendsToFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
// create pipeline
pipelineName := uniqueString("pipeline")
outRepo := ppsserver.PipelineRepo(client.NewPipeline(pipelineName))
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
"echo foo >> /pfs/out/file",
},
3,
nil,
))
// Manually trigger the pipeline
job, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
})
require.NoError(t, err)
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\n", buffer.String())
// Manually trigger the pipeline
_, err = c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Pipeline: &ppsclient.Pipeline{
Name: pipelineName,
},
ParentJob: job,
})
require.NoError(t, err)
listCommitRequest = &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{outRepo},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
FromCommit: []*pfsclient.Commit{outCommits[0].Commit},
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(outRepo.Name, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer2))
require.Equal(t, "foo\nfoo\nfoo\nfoo\nfoo\nfoo\n", buffer2.String())
}
func TestRemoveAndAppend(t *testing.T) {
testParellelRemoveAndAppend(t, 1)
}
func TestParellelRemoveAndAppend(t *testing.T) {
// This test does not pass on Travis which is why it's skipped right now As
// soon as we have a hypothesis for why this fails on travis but not
// locally we should un skip this test and try to fix it.
t.Skip()
testParellelRemoveAndAppend(t, 3)
}
func testParellelRemoveAndAppend(t *testing.T, parallelism int) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel() //cleanup resources
job1, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"echo foo > /pfs/out/file",
},
},
Parallelism: uint64(parallelism),
})
require.NoError(t, err)
inspectJobRequest1 := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
jobInfo1, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest1)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo1.State)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo1.OutputCommit.Repo.Name, jobInfo1.OutputCommit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, strings.Repeat("foo\n", parallelism), buffer.String())
job2, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"unlink /pfs/out/file && echo bar > /pfs/out/file",
},
},
Parallelism: uint64(parallelism),
ParentJob: job1,
})
require.NoError(t, err)
inspectJobRequest2 := &ppsclient.InspectJobRequest{
Job: job2,
BlockState: true,
}
jobInfo2, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest2)
require.NoError(t, err)
c.GetLogs(jobInfo2.Job.ID, os.Stdout)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo2.State)
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(jobInfo2.OutputCommit.Repo.Name, jobInfo2.OutputCommit.ID, "file", 0, 0, "", nil, &buffer2))
require.Equal(t, strings.Repeat("bar\n", parallelism), buffer2.String())
}
func TestWorkload(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
seed := time.Now().UnixNano()
require.NoError(t, workload.RunWorkload(c, rand.New(rand.NewSource(seed)), 100))
}
func TestSharding(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
repo := uniqueString("TestSharding")
c := getPachClient(t)
err := c.CreateRepo(repo)
require.NoError(t, err)
commit, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
var wg sync.WaitGroup
for i := 0; i < NUMFILES; i++ {
i := i
wg.Add(1)
go func() {
defer wg.Done()
rand := rand.New(rand.NewSource(int64(i)))
_, err = c.PutFile(repo, commit.ID, fmt.Sprintf("file%d", i), workload.NewReader(rand, KB))
require.NoError(t, err)
}()
}
wg.Wait()
err = c.FinishCommit(repo, commit.ID)
require.NoError(t, err)
wg = sync.WaitGroup{}
for i := 0; i < NUMFILES; i++ {
i := i
wg.Add(1)
go func() {
defer wg.Done()
var buffer1Shard bytes.Buffer
var buffer4Shard bytes.Buffer
shard := &pfsclient.Shard{FileModulus: 1, BlockModulus: 1}
err := c.GetFile(repo, commit.ID,
fmt.Sprintf("file%d", i), 0, 0, "", shard, &buffer1Shard)
require.NoError(t, err)
shard.BlockModulus = 4
for blockNumber := uint64(0); blockNumber < 4; blockNumber++ {
shard.BlockNumber = blockNumber
err := c.GetFile(repo, commit.ID,
fmt.Sprintf("file%d", i), 0, 0, "", shard, &buffer4Shard)
require.NoError(t, err)
}
require.Equal(t, buffer1Shard.Len(), buffer4Shard.Len())
}()
}
wg.Wait()
}
func TestFromCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
repo := uniqueString("TestFromCommit")
c := getPachClient(t)
seed := time.Now().UnixNano()
rand := rand.New(rand.NewSource(seed))
err := c.CreateRepo(repo)
require.NoError(t, err)
commit1, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "file", workload.NewReader(rand, KB))
require.NoError(t, err)
err = c.FinishCommit(repo, commit1.ID)
require.NoError(t, err)
commit2, err := c.StartCommit(repo, commit1.ID, "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit2.ID, "file", workload.NewReader(rand, KB))
require.NoError(t, err)
err = c.FinishCommit(repo, commit2.ID)
require.NoError(t, err)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(repo, commit2.ID, "file", 0, 0, commit1.ID, nil, &buffer))
require.Equal(t, buffer.Len(), KB)
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(repo, commit2.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, buffer.Len(), 2*KB)
}
func TestSimple(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("TestSimple")
require.NoError(t, c.CreateRepo(repo))
commit1, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "foo", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit1.ID))
commitInfos, err := c.ListCommit([]string{repo}, nil, client.CommitTypeNone, false, false, nil)
require.NoError(t, err)
require.Equal(t, 1, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(repo, commit1.ID, "foo", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
commit2, err := c.StartCommit(repo, commit1.ID, "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit2.ID, "foo", strings.NewReader("foo\n"))
require.NoError(t, err)
err = c.FinishCommit(repo, commit2.ID)
require.NoError(t, err)
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(repo, commit1.ID, "foo", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\n", buffer.String())
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(repo, commit2.ID, "foo", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\nfoo\n", buffer.String())
}
func TestPipelineWithMultipleInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
inputRepo1 := uniqueString("inputRepo")
require.NoError(t, c.CreateRepo(inputRepo1))
inputRepo2 := uniqueString("inputRepo")
require.NoError(t, c.CreateRepo(inputRepo2))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{fmt.Sprintf(`
repo1=%s
repo2=%s
echo $repo1
ls -1 /pfs/$repo1
echo $repo2
ls -1 /pfs/$repo2
for f1 in /pfs/$repo1/*
do
for f2 in /pfs/$repo2/*
do
v1=$(<$f1)
v2=$(<$f2)
echo $v1$v2 >> /pfs/out/file
done
done
`, inputRepo1, inputRepo2)},
4,
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: inputRepo1},
Method: client.IncrementalReduceMethod,
},
{
Repo: &pfsclient.Repo{Name: inputRepo2},
Method: client.IncrementalReduceMethod,
},
},
))
content := "foo"
numfiles := 10
commit1, err := c.StartCommit(inputRepo1, "", "")
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo1, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo1, commit1.ID))
commit2, err := c.StartCommit(inputRepo2, "", "")
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo2, commit2.ID, fmt.Sprintf("file%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo2, commit2.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{{pipelineName}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
fileInfos, err := c.ListFile(pipelineName, outCommits[0].Commit.ID, "", "", nil, false)
require.NoError(t, err)
require.Equal(t, 1, len(fileInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
lines := strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, numfiles*numfiles, len(lines))
for _, line := range lines {
require.Equal(t, len(content)*2, len(line))
}
commit3, err := c.StartCommit(inputRepo1, commit1.ID, "")
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo1, commit3.ID, fmt.Sprintf("file2-%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo1, commit3.ID))
listCommitRequest.FromCommit = append(listCommitRequest.FromCommit, outCommits[0].Commit)
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
lines = strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, 2*numfiles*numfiles, len(lines))
for _, line := range lines {
require.Equal(t, len(content)*2, len(line))
}
commit4, err := c.StartCommit(inputRepo2, commit2.ID, "")
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(inputRepo2, commit4.ID, fmt.Sprintf("file2-%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(inputRepo2, commit4.ID))
listCommitRequest.FromCommit[0] = outCommits[0].Commit
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
lines = strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, 4*numfiles*numfiles, len(lines))
for _, line := range lines {
require.Equal(t, len(content)*2, len(line))
}
}
func TestPipelineWithGlobalMethod(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
globalRepo := uniqueString("inputRepo")
require.NoError(t, c.CreateRepo(globalRepo))
numfiles := 20
pipelineName := uniqueString("pipeline")
parallelism := 2
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
// this script simply outputs the number of files under the global repo
[]string{fmt.Sprintf(`
numfiles=(/pfs/%s/*)
numfiles=${#numfiles[@]}
echo $numfiles > /pfs/out/file
`, globalRepo)},
uint64(parallelism),
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: globalRepo},
Method: client.GlobalMethod,
},
},
))
content := "foo"
commit, err := c.StartCommit(globalRepo, "", "")
require.NoError(t, err)
for i := 0; i < numfiles; i++ {
_, err = c.PutFile(globalRepo, commit.ID, fmt.Sprintf("file%d", i), strings.NewReader(content))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(globalRepo, commit.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{{pipelineName}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
fileInfos, err := c.ListFile(pipelineName, outCommits[0].Commit.ID, "", "", nil, false)
require.NoError(t, err)
require.Equal(t, 1, len(fileInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
lines := strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, parallelism, len(lines)) // each job outputs one line
for _, line := range lines {
require.Equal(t, fmt.Sprintf("%d", numfiles), line)
}
}
func TestPipelineWithPrevRepoAndIncrementalReduceMethod(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("repo")
require.NoError(t, c.CreateRepo(repo))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{fmt.Sprintf(`
cat /pfs/%s/file >>/pfs/out/file
if [ -d "/pfs/prev" ]; then
cat /pfs/prev/file >>/pfs/out/file
fi
`, repo)},
1,
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
Method: client.IncrementalReduceMethod,
},
},
))
commit1, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, c.FinishCommit(repo, commit1.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{{pipelineName}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
listCommitResponse, err := c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer))
lines := strings.Split(strings.TrimSpace(buffer.String()), "\n")
require.Equal(t, 1, len(lines))
require.Equal(t, "foo", lines[0])
commit2, err := c.StartCommit(repo, commit1.ID, "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, c.FinishCommit(repo, commit2.ID))
listCommitRequest = &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{{pipelineName}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
FromCommit: []*pfsclient.Commit{outCommits[0].Commit},
}
listCommitResponse, err = c.PfsAPIClient.ListCommit(
context.Background(),
listCommitRequest,
)
require.NoError(t, err)
outCommits = listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
var buffer2 bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, outCommits[0].Commit.ID, "file", 0, 0, "", nil, &buffer2))
lines = strings.Split(strings.TrimSpace(buffer2.String()), "\n")
require.Equal(t, 3, len(lines))
require.Equal(t, "foo", lines[0])
require.Equal(t, "bar", lines[1])
require.Equal(t, "foo", lines[2])
}
func TestPipelineThatUseNonexistentInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
pipelineName := uniqueString("pipeline")
require.YesError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
1,
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: "nonexistent"},
},
},
))
}
func TestPipelineWhoseInputsGetDeleted(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("repo")
require.NoError(t, c.CreateRepo(repo))
pipelineName := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{fmt.Sprintf(``, repo)},
1,
[]*ppsclient.PipelineInput{
{
Repo: &pfsclient.Repo{Name: repo},
},
},
))
// Shouldn't be able to delete the input repo because the pipeline
// is still running
require.YesError(t, c.DeleteRepo(repo))
// The correct flow to delete the input repo
require.NoError(t, c.DeletePipeline(pipelineName))
require.NoError(t, c.DeleteRepo(pipelineName))
require.NoError(t, c.DeleteRepo(repo))
}
// This test fails if you updated some static assets (such as doc/pipeline_spec.md)
// that are used in code but forgot to run:
// $ make assets
func TestAssets(t *testing.T) {
assetPaths := []string{"doc/pipeline_spec.md"}
for _, path := range assetPaths {
doc, err := ioutil.ReadFile(filepath.Join(os.Getenv("GOPATH"), "src/github.com/pachyderm/pachyderm/", path))
if err != nil {
t.Fatal(err)
}
asset, err := pachyderm.Asset(path)
if err != nil {
t.Fatal(err)
}
require.Equal(t, doc, asset)
}
}
// TestProvenance creates a pipeline DAG that's not a transitive reduction
// It looks like this:
// A
// | \
// v v
// B-->C
// When we commit to A we expect to see 1 commit on C rather than 2.
func TestProvenance(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
aRepo := uniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
bPipeline := uniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(aRepo)}},
))
cPipeline := uniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("diff %s %s >/pfs/out/file",
path.Join("/pfs", aRepo, "file"), path.Join("/pfs", bPipeline, "file"))},
1,
[]*ppsclient.PipelineInput{
{Repo: client.NewRepo(aRepo)},
{Repo: client.NewRepo(bPipeline)},
},
))
// commit to aRepo
commit1, err := c.StartCommit(aRepo, "", "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit1.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit(aRepo, commit1.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
commit2, err := c.StartCommit(aRepo, "", "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit2.ID))
commitInfos, err = c.FlushCommit([]*pfsclient.Commit{client.NewCommit(aRepo, commit2.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
// There should only be 2 commits on cRepo
commitInfos, err = c.ListCommit([]string{cPipeline}, nil, pfsclient.CommitType_COMMIT_TYPE_READ, false, false, nil)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
for _, commitInfo := range commitInfos {
// C takes the diff of 2 files that should always be the same, so we
// expect an empty file
fileInfo, err := c.InspectFile(cPipeline, commitInfo.Commit.ID, "file", "", nil)
require.NoError(t, err)
require.Equal(t, uint64(0), fileInfo.SizeBytes)
}
}
func TestDirectory(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*60)
defer cancel() //cleanup resources
job1, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"mkdir /pfs/out/dir",
"echo foo >> /pfs/out/dir/file",
},
},
Parallelism: 3,
})
require.NoError(t, err)
inspectJobRequest1 := &ppsclient.InspectJobRequest{
Job: job1,
BlockState: true,
}
jobInfo1, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest1)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo1.State)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo1.OutputCommit.Repo.Name, jobInfo1.OutputCommit.ID, "dir/file", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\n", buffer.String())
job2, err := c.PpsAPIClient.CreateJob(context.Background(), &ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"echo bar >> /pfs/out/dir/file",
},
},
Parallelism: 3,
ParentJob: job1,
})
require.NoError(t, err)
inspectJobRequest2 := &ppsclient.InspectJobRequest{
Job: job2,
BlockState: true,
}
jobInfo2, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest2)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS, jobInfo2.State)
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(jobInfo2.OutputCommit.Repo.Name, jobInfo2.OutputCommit.ID, "dir/file", 0, 0, "", nil, &buffer))
require.Equal(t, "foo\nfoo\nfoo\nbar\nbar\nbar\n", buffer.String())
}
func TestFailedJobReadData(t *testing.T) {
// We want to enable users to be able to read data from cancelled commits for debugging purposes`
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
shards := 0
c := getPachClient(t)
repo := uniqueString("TestJob_Foo")
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
fileContent := "foo\n"
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader(fileContent))
require.NoError(t, err)
err = c.FinishCommit(repo, commit.ID)
require.NoError(t, err)
job, err := c.CreateJob(
"",
[]string{"bash"},
[]string{
"echo fubar > /pfs/out/file",
"exit 1",
},
uint64(shards),
[]*ppsclient.JobInput{
{
Commit: commit,
Method: client.ReduceMethod,
},
},
"",
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_FAILURE.String(), jobInfo.State.String())
require.True(t, jobInfo.Parallelism > 0)
c.GetLogs(jobInfo.Job.ID, os.Stdout)
commitInfo, err := c.InspectCommit(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID)
require.NoError(t, err)
require.Equal(t, pfsclient.CommitType_COMMIT_TYPE_READ, commitInfo.CommitType)
require.Equal(t, true, commitInfo.Cancelled)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(jobInfo.OutputCommit.Repo.Name, jobInfo.OutputCommit.ID, "file", 0, 0, "", nil, &buffer))
require.Equal(t, "fubar", strings.TrimSpace(buffer.String()))
}
// TestFlushCommit
func TestFlushCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
prefix := uniqueString("repo")
makeRepoName := func(i int) string {
return fmt.Sprintf("%s_%d", prefix, i)
}
sourceRepo := makeRepoName(0)
require.NoError(t, c.CreateRepo(sourceRepo))
// Create a five-stage pipeline
numStages := 5
for i := 0; i < numStages; i++ {
repo := makeRepoName(i)
require.NoError(t, c.CreatePipeline(
makeRepoName(i+1),
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
))
}
test := func(parent string) string {
commit, err := c.StartCommit(sourceRepo, parent, "")
require.NoError(t, err)
_, err = c.PutFile(sourceRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(sourceRepo, commit.ID))
commitInfos, err := c.FlushCommit([]*pfsclient.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil)
require.NoError(t, err)
require.Equal(t, numStages, len(commitInfos))
return commit.ID
}
// Run the test twice, once on a orphan commit and another on
// a commit with a parent
commit := test("")
test(commit)
}
// TestFlushCommitWithFailure is similar to TestFlushCommit except that
// the pipeline is designed to fail
func TestFlushCommitWithFailure(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
prefix := uniqueString("repo")
makeRepoName := func(i int) string {
return fmt.Sprintf("%s_%d", prefix, i)
}
sourceRepo := makeRepoName(0)
require.NoError(t, c.CreateRepo(sourceRepo))
// Create a five-stage pipeline; the third stage is designed to fail
numStages := 5
for i := 0; i < numStages; i++ {
fileName := "file"
if i == 3 {
fileName = "nonexistent"
}
repo := makeRepoName(i)
require.NoError(t, c.CreatePipeline(
makeRepoName(i+1),
"",
[]string{"cp", path.Join("/pfs", repo, fileName), "/pfs/out/file"},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
))
}
commit, err := c.StartCommit(sourceRepo, "", "")
require.NoError(t, err)
_, err = c.PutFile(sourceRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(sourceRepo, commit.ID))
_, err = c.FlushCommit([]*pfsclient.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil)
fmt.Println(err.Error())
require.YesError(t, err)
}
// TestRecreatingPipeline tracks #432
func TestRecreatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipeline := uniqueString("pipeline")
createPipelineAndRunJob := func() {
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
))
commit, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
listCommitRequest := &pfsclient.ListCommitRequest{
Repo: []*pfsclient.Repo{{pipeline}},
CommitType: pfsclient.CommitType_COMMIT_TYPE_READ,
Block: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel()
listCommitResponse, err := c.PfsAPIClient.ListCommit(
ctx,
listCommitRequest,
)
require.NoError(t, err)
outCommits := listCommitResponse.CommitInfo
require.Equal(t, 1, len(outCommits))
}
// Do it twice. We expect jobs to be created on both runs.
createPipelineAndRunJob()
require.NoError(t, c.DeleteRepo(pipeline))
require.NoError(t, c.DeletePipeline(pipeline))
createPipelineAndRunJob()
}
func TestPipelineState(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
repo := uniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipeline := uniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: client.NewRepo(repo)}},
))
time.Sleep(5 * time.Second) // wait for this pipeline to get picked up
pipelineInfo, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, ppsclient.PipelineState_PIPELINE_RUNNING, pipelineInfo.State)
// Now we introduce an error to the pipeline by removing its output repo
// and starting a job
require.NoError(t, c.DeleteRepo(pipeline))
commit, err := c.StartCommit(repo, "", "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
// So the state of the pipeline will alternate between running and
// restarting. We just want to make sure that it has definitely restarted.
var states []interface{}
for i := 0; i < 10; i++ {
time.Sleep(1 * time.Second)
pipelineInfo, err = c.InspectPipeline(pipeline)
require.NoError(t, err)
states = append(states, pipelineInfo.State)
}
require.EqualOneOf(t, states, ppsclient.PipelineState_PIPELINE_RESTARTING)
}
func TestClusterFunctioningAfterMembershipChange(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
k := getKubeClient(t)
scalePachd(t, k)
// Wait for the cluster to stablize... ideally we shouldn't have to
// do that.
time.Sleep(20 * time.Second)
TestJob(t)
}
// scalePachd scales the number of pachd nodes to anywhere from 1 to
// twice the original number
// It's guaranteed that the new replica number will be different from
// the original
func scalePachd(t *testing.T, k *kube.Client) {
rc := k.ReplicationControllers(api.NamespaceDefault)
pachdRc, err := rc.Get("pachd")
require.NoError(t, err)
originalReplicas := pachdRc.Spec.Replicas
for {
pachdRc.Spec.Replicas = int32(rand.Intn(int(originalReplicas)*2) + 1)
if pachdRc.Spec.Replicas != originalReplicas {
break
}
}
fmt.Printf("scaling pachd to %d replicas\n", pachdRc.Spec.Replicas)
_, err = rc.Update(pachdRc)
require.NoError(t, err)
}
func TestScrubbedErrors(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
_, err := c.InspectPipeline("blah")
require.Equal(t, "PipelineInfos blah not found", err.Error())
err = c.CreatePipeline(
"lskdjf$#%^ERTYC",
"",
[]string{},
nil,
1,
[]*ppsclient.PipelineInput{{Repo: &pfsclient.Repo{Name: "test"}}},
)
require.Equal(t, "Repo test not found", err.Error())
_, err = c.CreateJob("askjdfhgsdflkjh", []string{}, []string{}, 0, []*ppsclient.JobInput{client.NewJobInput("bogusRepo", "bogusCommit", client.DefaultMethod)}, "")
require.Matches(t, "Repo job_.* not found", err.Error())
_, err = c.InspectJob("blah", true)
require.Equal(t, "JobInfos blah not found", err.Error())
home := os.Getenv("HOME")
f, err := os.Create(filepath.Join(home, "/tmpfile"))
defer func() {
os.Remove(filepath.Join(home, "/tmpfile"))
}()
require.NoError(t, err)
err = c.GetLogs("bogusJobId", f)
require.Equal(t, "Job bogusJobId not found", err.Error())
}
func TestAcceptReturnCode(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
t.Parallel()
c := getPachClient(t)
job, err := c.PpsAPIClient.CreateJob(
context.Background(),
&ppsclient.CreateJobRequest{
Transform: &ppsclient.Transform{
Cmd: []string{"sh"},
Stdin: []string{"exit 1"},
AcceptReturnCode: []int64{1},
},
},
)
require.NoError(t, err)
inspectJobRequest := &ppsclient.InspectJobRequest{
Job: job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
jobInfo, err := c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
require.Equal(t, ppsclient.JobState_JOB_SUCCESS.String(), jobInfo.State.String())
}
func getPachClient(t *testing.T) *client.APIClient {
client, err := client.NewFromAddress("0.0.0.0:30650")
require.NoError(t, err)
return client
}
func getKubeClient(t *testing.T) *kube.Client {
config := &kube_client.Config{
Host: "0.0.0.0:8080",
Insecure: false,
}
k, err := kube.New(config)
require.NoError(t, err)
return k
}
func uniqueString(prefix string) string {
return prefix + "_" + uuid.NewWithoutDashes()[0:12]
}
|
package server
import (
"bytes"
"context"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"strconv"
"strings"
"sync"
"testing"
"time"
"golang.org/x/sync/errgroup"
"github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/auth"
"github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/require"
"github.com/pachyderm/pachyderm/src/client/pps"
pfspretty "github.com/pachyderm/pachyderm/src/server/pfs/pretty"
"github.com/pachyderm/pachyderm/src/server/pkg/backoff"
col "github.com/pachyderm/pachyderm/src/server/pkg/collection"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsutil"
"github.com/pachyderm/pachyderm/src/server/pkg/pretty"
tu "github.com/pachyderm/pachyderm/src/server/pkg/testutil"
"github.com/pachyderm/pachyderm/src/server/pkg/uuid"
"github.com/pachyderm/pachyderm/src/server/pkg/workload"
ppspretty "github.com/pachyderm/pachyderm/src/server/pps/pretty"
"github.com/pachyderm/pachyderm/src/server/pps/server/githook"
etcd "github.com/coreos/etcd/clientv3"
"github.com/gogo/protobuf/types"
prom_api "github.com/prometheus/client_golang/api"
prom_api_v1 "github.com/prometheus/client_golang/api/prometheus/v1"
prom_model "github.com/prometheus/common/model"
apps "k8s.io/api/apps/v1beta2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
kube "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const (
// If this environment variable is set, then the tests are being run
// in a real cluster in the cloud.
InCloudEnv = "PACH_TEST_CLOUD"
)
func TestSimplePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestSimplePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("TestSimplePipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
}
func TestPipelineWithParallelism(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithParallelism_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 1000
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(fmt.Sprintf("%d", i)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
&pps.ParallelismSpec{
Constant: 4,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
for i := 0; i < numFiles; i++ {
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file-%d", i), 0, 0, &buf))
require.Equal(t, fmt.Sprintf("%d", i), buf.String())
}
}
func TestPipelineWithLargeFiles(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithLargeFiles_data")
require.NoError(t, c.CreateRepo(dataRepo))
r := rand.New(rand.NewSource(99))
numFiles := 10
var fileContents []string
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
fileContent := workload.RandString(r, int(pfs.ChunkSize)+i*MB)
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i),
strings.NewReader(fileContent))
require.NoError(t, err)
fileContents = append(fileContents, fileContent)
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
nil,
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit := commitInfos[0].Commit
for i := 0; i < numFiles; i++ {
var buf bytes.Buffer
fileName := fmt.Sprintf("file-%d", i)
fileInfo, err := c.InspectFile(commit.Repo.Name, commit.ID, fileName)
require.NoError(t, err)
require.Equal(t, int(pfs.ChunkSize)+i*MB, int(fileInfo.SizeBytes))
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, fileName, 0, 0, &buf))
// we don't wanna use the `require` package here since it prints
// the strings, which would clutter the output.
if fileContents[i] != buf.String() {
t.Fatalf("file content does not match")
}
}
}
func TestDatumDedup(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestDatumDedup_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
// This pipeline sleeps for 10 secs per datum
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"sleep 10",
},
nil,
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
// Since we did not change the datum, the datum should not be processed
// again, which means that the job should complete instantly.
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
stream, err := c.PfsAPIClient.FlushCommit(
ctx,
&pfs.FlushCommitRequest{
Commits: []*pfs.Commit{commit2},
})
require.NoError(t, err)
_, err = stream.Recv()
require.NoError(t, err)
}
func TestPipelineInputDataModification(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineInputDataModification_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
nil,
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
// replace the contents of 'file' in dataRepo (from "foo" to "bar")
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.DeleteFile(dataRepo, commit2.ID, "file"))
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
commitIter, err = c.FlushCommit([]*pfs.Commit{commit2}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
buf.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "bar", buf.String())
// Add a file to dataRepo
commit3, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.DeleteFile(dataRepo, commit3.ID, "file"))
_, err = c.PutFile(dataRepo, commit3.ID, "file2", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit3.ID))
commitIter, err = c.FlushCommit([]*pfs.Commit{commit3}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
require.YesError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
buf.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file2", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
commitInfos, err = c.ListCommit(pipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 3, len(commitInfos))
}
func TestMultipleInputsFromTheSameBranch(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestMultipleInputsFromTheSameBranch_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "dirA/file", strings.NewReader("foo\n"))
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "dirB/file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cat /pfs/out/file",
fmt.Sprintf("cat /pfs/dirA/dirA/file >> /pfs/out/file"),
fmt.Sprintf("cat /pfs/dirB/dirB/file >> /pfs/out/file"),
},
nil,
client.NewCrossInput(
client.NewAtomInputOpts("dirA", dataRepo, "", "/dirA/*", false),
client.NewAtomInputOpts("dirB", dataRepo, "", "/dirB/*", false),
),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "foo\nfoo\n", buf.String())
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "dirA/file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
commitIter, err = c.FlushCommit([]*pfs.Commit{commit2}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
buf.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "foo\nbar\nfoo\n", buf.String())
commit3, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit3.ID, "dirB/file", strings.NewReader("buzz\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit3.ID))
commitIter, err = c.FlushCommit([]*pfs.Commit{commit3}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
buf.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "foo\nbar\nfoo\nbuzz\n", buf.String())
commitInfos, err = c.ListCommit(pipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 3, len(commitInfos))
}
func TestMultipleInputsFromTheSameRepoDifferentBranches(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestMultipleInputsFromTheSameRepoDifferentBranches_data")
require.NoError(t, c.CreateRepo(dataRepo))
branchA := "branchA"
branchB := "branchB"
pipeline := tu.UniqueString("pipeline")
// Creating this pipeline should error, because the two inputs are
// from the same repo but they don't specify different names.
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cat /pfs/branch-a/file >> /pfs/out/file",
"cat /pfs/branch-b/file >> /pfs/out/file",
},
nil,
client.NewCrossInput(
client.NewAtomInputOpts("branch-a", dataRepo, branchA, "/*", false),
client.NewAtomInputOpts("branch-b", dataRepo, branchB, "/*", false),
),
"",
false,
))
commitA, err := c.StartCommit(dataRepo, branchA)
require.NoError(t, err)
c.PutFile(dataRepo, commitA.ID, "/file", strings.NewReader("data A\n"))
c.FinishCommit(dataRepo, commitA.ID)
commitB, err := c.StartCommit(dataRepo, branchB)
require.NoError(t, err)
c.PutFile(dataRepo, commitB.ID, "/file", strings.NewReader("data B\n"))
c.FinishCommit(dataRepo, commitB.ID)
iter, err := c.FlushCommit([]*pfs.Commit{commitA, commitB}, nil)
require.NoError(t, err)
commits := collectCommitInfos(t, iter)
require.Equal(t, 1, len(commits))
buffer := bytes.Buffer{}
require.NoError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "data A\ndata B\n", buffer.String())
}
func TestMultipleInputsFromTheSameRepoDifferentBranchesIncremental(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestMultipleInputsFromTheSameRepoDifferentBranchesIncremental_data")
require.NoError(t, c.CreateRepo(dataRepo))
branchA := "branchA"
branchB := "branchB"
pipeline := tu.UniqueString("pipeline")
// Creating this pipeline should error, because the two inputs are
// from the same repo but they don't specify different names.
req := &pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{Name: pipeline},
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"ls /pfs/out/file-a && echo true >> /pfs/out/prev-a",
"ls /pfs/out/file-b && echo true >> /pfs/out/prev-b",
"ls /pfs/branch-a/file && echo true >> /pfs/out/file-a",
"ls /pfs/branch-b/file && echo true >> /pfs/out/file-b",
},
},
Input: client.NewCrossInput(
client.NewAtomInputOpts("branch-a", dataRepo, branchA, "/*", false),
client.NewAtomInputOpts("branch-b", dataRepo, branchB, "/*", false),
),
Incremental: true,
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
_, err := c.PpsAPIClient.CreatePipeline(ctx, req)
require.NoError(t, err)
// Make four commits: branchA, branchB, branchA, branchB. We should see
// 'prev-a' after the third commit, and 'prev-b' after the fourth
commit, err := c.StartCommit(dataRepo, branchA)
require.NoError(t, err)
c.PutFile(dataRepo, commit.ID, "/file", strings.NewReader("data A\n"))
c.FinishCommit(dataRepo, commit.ID)
commit, err = c.StartCommit(dataRepo, branchB)
require.NoError(t, err)
c.PutFile(dataRepo, commit.ID, "/file", strings.NewReader("data B\n"))
c.FinishCommit(dataRepo, commit.ID)
iter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commits := collectCommitInfos(t, iter)
require.Equal(t, 1, len(commits))
buffer := bytes.Buffer{}
require.YesError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "prev-a", 0, 0, &buffer))
buffer.Reset()
require.YesError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "prev-b", 0, 0, &buffer))
commit, err = c.StartCommit(dataRepo, branchA)
require.NoError(t, err)
c.PutFile(dataRepo, commit.ID, "/file", strings.NewReader("data A\n"))
c.FinishCommit(dataRepo, commit.ID)
iter, err = c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commits = collectCommitInfos(t, iter)
require.Equal(t, 1, len(commits))
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "prev-a", 0, 0, &buffer))
buffer.Reset()
require.NoError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "prev-b", 0, 0, &buffer))
}
func TestPipelineFailure(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineFailure_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"exit 1"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
var jobInfos []*pps.JobInfo
require.NoError(t, backoff.Retry(func() error {
jobInfos, err = c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
if len(jobInfos) != 1 {
return fmt.Errorf("expected 1 jobs, got %d", len(jobInfos))
}
return nil
}, backoff.NewTestingBackOff()))
jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), &pps.InspectJobRequest{
Job: jobInfos[0].Job,
BlockState: true,
})
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State)
require.True(t, strings.Contains(jobInfo.Reason, "datum"))
}
func TestEgressFailure(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestEgressFailure_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
// This pipeline should fail because the egress URL is invalid
pipeline := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
Input: client.NewAtomInput(dataRepo, "/"),
Egress: &pps.Egress{"invalid://blahblah"},
})
require.NoError(t, err)
var jobInfos []*pps.JobInfo
require.NoError(t, backoff.Retry(func() error {
jobInfos, err = c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
if len(jobInfos) != 1 {
return fmt.Errorf("expected 1 jobs, got %d", len(jobInfos))
}
return nil
}, backoff.NewTestingBackOff()))
jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), &pps.InspectJobRequest{
Job: jobInfos[0].Job,
BlockState: true,
})
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State)
require.True(t, strings.Contains(jobInfo.Reason, "egress"))
}
func TestLazyPipelinePropagation(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestLazyPipelinePropagation_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipelineA := tu.UniqueString("pipeline-A")
require.NoError(t, c.CreatePipeline(
pipelineA,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInputOpts("", dataRepo, "", "/*", true),
"",
false,
))
pipelineB := tu.UniqueString("pipeline-B")
require.NoError(t, c.CreatePipeline(
pipelineB,
"",
[]string{"cp", path.Join("/pfs", pipelineA, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInputOpts("", pipelineA, "", "/*", true),
"",
false,
))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, commit1.ID)}, nil)
require.NoError(t, err)
collectCommitInfos(t, commitIter)
jobInfos, err := c.ListJob(pipelineA, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
require.NotNil(t, jobInfos[0].Input.Atom)
require.Equal(t, true, jobInfos[0].Input.Atom.Lazy)
jobInfos, err = c.ListJob(pipelineB, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
require.NotNil(t, jobInfos[0].Input.Atom)
require.Equal(t, true, jobInfos[0].Input.Atom.Lazy)
}
func TestLazyPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestLazyPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &pps.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Glob: "/",
Lazy: true,
},
},
})
require.NoError(t, err)
// Do a commit
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
// We put 2 files, 1 of which will never be touched by the pipeline code.
// This is an important part of the correctness of this test because the
// job-shim sets up a goro for each pipe, pipes that are never opened will
// leak but that shouldn't prevent the job from completing.
_, err = c.PutFile(dataRepo, "master", "file2", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
buffer := bytes.Buffer{}
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
}
func TestEmptyFiles(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestShufflePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("if [ -s /pfs/%s/file]; then exit 1; fi", dataRepo),
fmt.Sprintf("ln -s /pfs/%s/file /pfs/out/file", dataRepo),
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Glob: "/*",
EmptyFiles: true,
},
},
})
require.NoError(t, err)
// Do a commit
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
buffer := bytes.Buffer{}
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
}
// There's an issue where if you use cp with certain flags, it might copy
// special files without reading from them. In our case, we use named pipes
// to simulate lazy files, so the pipes themselves might get copied into
// the output directory, blocking upload.
//
// We've updated the code such that we are able to detect if the files we
// are uploading are pipes, and make the job fail in that case.
func TestLazyPipelineCPPipes(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestLazyPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipeline := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
// Using cp with the -r flag apparently just copes go
Cmd: []string{"cp", "-r", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Glob: "/",
Lazy: true,
},
},
})
require.NoError(t, err)
// Do a commit
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
// wait for job to spawn
time.Sleep(15 * time.Second)
var jobID string
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipeline, nil, nil)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("len(jobInfos) should be 1")
}
jobID = jobInfos[0].Job.ID
jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), &pps.InspectJobRequest{
Job: client.NewJob(jobID),
BlockState: true,
})
if err != nil {
return err
}
if jobInfo.State != pps.JobState_JOB_FAILURE {
return fmt.Errorf("job did not fail, even though it tried to copy " +
"pipes, which should be disallowed by Pachyderm")
}
return nil
}, backoff.NewTestingBackOff()))
}
// TestProvenance creates a pipeline DAG that's not a transitive reduction
// It looks like this:
// A
// | \
// v v
// B-->C
// When we commit to A we expect to see 1 commit on C rather than 2.
func TestProvenance(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
aRepo := tu.UniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
bPipeline := tu.UniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(aRepo, "/*"),
"",
false,
))
cPipeline := tu.UniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("diff %s %s >/pfs/out/file",
path.Join("/pfs", aRepo, "file"), path.Join("/pfs", bPipeline, "file"))},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewAtomInput(aRepo, "/*"),
client.NewAtomInput(bPipeline, "/*"),
),
"",
false,
))
// commit to aRepo
commit1, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit1.ID))
commit2, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit2.ID))
aCommit := commit2
commitIter, err := c.FlushCommit([]*pfs.Commit{aCommit}, []*pfs.Repo{{bPipeline}})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
bCommit := commitInfos[0].Commit
commitIter, err = c.FlushCommit([]*pfs.Commit{aCommit, bCommit}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
cCommitInfo := commitInfos[0]
require.Equal(t, uint64(0), cCommitInfo.SizeBytes)
// We should only see two commits in aRepo
commitInfos, err = c.ListCommit(aRepo, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
// There are three commits in the pipeline repos (two from input commits, and
// one from the CreatePipeline call that created each repo)
commitInfos, err = c.ListCommit(bPipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
commitInfos, err = c.ListCommit(cPipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
}
// TestProvenance2 tests the following DAG:
// A
// / \
// B C
// \ /
// D
func TestProvenance2(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
aRepo := tu.UniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
bPipeline := tu.UniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "bfile"), "/pfs/out/bfile"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(aRepo, "/b*"),
"",
false,
))
cPipeline := tu.UniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "cfile"), "/pfs/out/cfile"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(aRepo, "/c*"),
"",
false,
))
dPipeline := tu.UniqueString("D")
require.NoError(t, c.CreatePipeline(
dPipeline,
"",
[]string{"sh"},
[]string{
fmt.Sprintf("diff /pfs/%s/bfile /pfs/%s/cfile >/pfs/out/file", bPipeline, cPipeline),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewAtomInput(bPipeline, "/*"),
client.NewAtomInput(cPipeline, "/*"),
),
"",
false,
))
// commit to aRepo
commit1, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit1.ID, "bfile", strings.NewReader("foo\n"))
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit1.ID, "cfile", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit1.ID))
commit2, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit2.ID, "bfile", strings.NewReader("bar\n"))
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit2.ID, "cfile", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit2.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit2}, []*pfs.Repo{{dPipeline}})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
// We should only see two commits in each repo.
commitInfos, err = c.ListCommit(bPipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
commitInfos, err = c.ListCommit(cPipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
commitInfos, err = c.ListCommit(dPipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
for _, commitInfo := range commitInfos {
commit := commitInfo.Commit
buffer := bytes.Buffer{}
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "", buffer.String())
}
}
// TestFlushCommit
func TestFlushCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
prefix := tu.UniqueString("repo")
makeRepoName := func(i int) string {
return fmt.Sprintf("%s-%d", prefix, i)
}
sourceRepo := makeRepoName(0)
require.NoError(t, c.CreateRepo(sourceRepo))
// Create a five-stage pipeline
numStages := 5
for i := 0; i < numStages; i++ {
repo := makeRepoName(i)
require.NoError(t, c.CreatePipeline(
makeRepoName(i+1),
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/*"),
"",
false,
))
}
for i := 0; i < 10; i++ {
commit, err := c.StartCommit(sourceRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(sourceRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(sourceRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, numStages, len(commitInfos))
jobInfos, err := c.FlushJobAll([]*pfs.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil)
require.NoError(t, err)
require.Equal(t, numStages, len(jobInfos))
}
}
func TestFlushCommitFailures(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestFlushCommitFailures")
require.NoError(t, c.CreateRepo(dataRepo))
prefix := tu.UniqueString("TestFlushCommitFailures")
pipelineName := func(i int) string { return prefix + fmt.Sprintf("%d", i) }
require.NoError(t, c.CreatePipeline(
pipelineName(0),
"",
[]string{"sh"},
[]string{fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo)},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
require.NoError(t, c.CreatePipeline(
pipelineName(1),
"",
[]string{"sh"},
[]string{
fmt.Sprintf("if [ -f /pfs/%s/file1 ]; then exit 1; fi", pipelineName(0)),
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", pipelineName(0)),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(pipelineName(0), "/*"),
"",
false,
))
require.NoError(t, c.CreatePipeline(
pipelineName(2),
"",
[]string{"sh"},
[]string{fmt.Sprintf("cp /pfs/%s/* /pfs/out/", pipelineName(1))},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(pipelineName(1), "/*"),
"",
false,
))
for i := 0; i < 2; i++ {
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
jobInfos, err := c.FlushJobAll([]*pfs.Commit{client.NewCommit(dataRepo, commit.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 3, len(jobInfos))
if i == 0 {
for _, ji := range jobInfos {
require.Equal(t, pps.JobState_JOB_SUCCESS.String(), ji.State.String())
}
} else {
for _, ji := range jobInfos {
if ji.Pipeline.Name != pipelineName(0) {
require.Equal(t, pps.JobState_JOB_FAILURE.String(), ji.State.String())
}
}
}
}
}
func TestFlushCommitAfterCreatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
repo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(repo))
var commit *pfs.Commit
var err error
for i := 0; i < 10; i++ {
commit, err = c.StartCommit(repo, "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader(fmt.Sprintf("foo%d\n", i)))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
}
require.NoError(t, c.SetBranch(repo, commit.ID, "master"))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(repo, "master")}, nil)
require.NoError(t, err)
collectCommitInfos(t, commitIter)
}
// TestRecreatePipeline tracks #432
func TestRecreatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
repo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
pipeline := tu.UniqueString("pipeline")
createPipeline := func() {
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(collectCommitInfos(t, commitIter)))
}
// Do it twice. We expect jobs to be created on both runs.
createPipeline()
time.Sleep(5 * time.Second)
require.NoError(t, c.DeletePipeline(pipeline))
time.Sleep(5 * time.Second)
createPipeline()
}
func TestDeletePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
repo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, uuid.NewWithoutDashes(), strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
pipeline := tu.UniqueString("pipeline")
createPipeline := func() {
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"sleep", "20"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/*"),
"",
false,
))
}
createPipeline()
time.Sleep(10 * time.Second)
// Wait for the pipeline to start running
require.NoError(t, backoff.Retry(func() error {
pipelineInfo, err := c.InspectPipeline(pipeline)
if err != nil {
return err
}
if pipelineInfo.State != pps.PipelineState_PIPELINE_RUNNING {
return fmt.Errorf("no running pipeline")
}
return nil
}, backoff.NewTestingBackOff()))
require.NoError(t, c.DeletePipeline(pipeline))
time.Sleep(5 * time.Second)
// Wait for the pipeline to disappear
require.NoError(t, backoff.Retry(func() error {
_, err := c.InspectPipeline(pipeline)
if err == nil {
return fmt.Errorf("expected pipeline to be missing, but it's still present")
}
return nil
}, backoff.NewTestingBackOff()))
// The job should be gone
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, len(jobs), 0)
}
func TestPipelineState(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
repo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/*"),
"",
false,
))
// Wait for pipeline to get picked up
time.Sleep(15 * time.Second)
require.NoError(t, backoff.Retry(func() error {
pipelineInfo, err := c.InspectPipeline(pipeline)
if err != nil {
return err
}
if pipelineInfo.State != pps.PipelineState_PIPELINE_RUNNING {
return fmt.Errorf("pipeline should be in state running, not: %s", pipelineInfo.State.String())
}
return nil
}, backoff.NewTestingBackOff()))
// Stop pipeline and wait for the pipeline to pause
require.NoError(t, c.StopPipeline(pipeline))
time.Sleep(5 * time.Second)
require.NoError(t, backoff.Retry(func() error {
pipelineInfo, err := c.InspectPipeline(pipeline)
if err != nil {
return err
}
if pipelineInfo.State != pps.PipelineState_PIPELINE_PAUSED {
return fmt.Errorf("pipeline never paused, even though StopPipeline() was called, state: %s", pipelineInfo.State.String())
}
return nil
}, backoff.NewTestingBackOff()))
// Restart pipeline and wait for the pipeline to resume
require.NoError(t, c.StartPipeline(pipeline))
time.Sleep(15 * time.Second)
require.NoError(t, backoff.Retry(func() error {
pipelineInfo, err := c.InspectPipeline(pipeline)
if err != nil {
return err
}
if pipelineInfo.State != pps.PipelineState_PIPELINE_RUNNING {
return fmt.Errorf("pipeline never restarted, even though StartPipeline() was called, state: %s", pipelineInfo.State.String())
}
return nil
}, backoff.NewTestingBackOff()))
}
func TestPipelineJobCounts(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
repo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/*"),
"",
false,
))
// Trigger a job by creating a commit
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
collectCommitInfos(t, commitIter)
jobInfos, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
inspectJobRequest := &pps.InspectJobRequest{
Job: jobInfos[0].Job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
_, err = c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
// check that the job has been accounted for
pipelineInfo, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, int32(1), pipelineInfo.JobCounts[int32(pps.JobState_JOB_SUCCESS)])
}
// TODO(msteffen): This test breaks the suite when run against cloud providers,
// because killing the pachd pod breaks the connection with pachctl port-forward
func TestDeleteAfterMembershipChange(t *testing.T) {
t.Skip("This is causing intermittent CI failures")
test := func(up bool) {
repo := tu.UniqueString("TestDeleteAfterMembershipChange")
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
require.NoError(t, c.CreateRepo(repo))
_, err := c.StartCommit(repo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, "master"))
scalePachdRandom(t, up)
c = getUsablePachClient(t)
require.NoError(t, c.DeleteRepo(repo, false))
}
test(true)
test(false)
}
// TODO(msteffen): This test breaks the suite when run against cloud providers,
// because killing the pachd pod breaks the connection with pachctl port-forward
func TestPachdRestartResumesRunningJobs(t *testing.T) {
t.Skip("This is causing intermittent CI failures")
// this test cannot be run in parallel because it restarts everything which breaks other tests.
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPachdRestartPickUpRunningJobs")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{
"sleep 10",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
time.Sleep(5 * time.Second)
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
require.Equal(t, pps.JobState_JOB_RUNNING, jobInfos[0].State)
restartOne(t)
// need a new client because the old one will have a defunct connection
c = getUsablePachClient(t)
jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)
}
// TestUpdatePipelineThatHasNoOutput tracks #1637
func TestUpdatePipelineThatHasNoOutput(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestUpdatePipelineThatHasNoOutput")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"sh"},
[]string{"exit 1"},
nil,
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
// Wait for job to spawn
var jobInfos []*pps.JobInfo
time.Sleep(10 * time.Second)
require.NoError(t, backoff.Retry(func() error {
var err error
jobInfos, err = c.ListJob(pipeline, nil, nil)
if err != nil {
return err
}
if len(jobInfos) < 1 {
return fmt.Errorf("job not spawned")
}
return nil
}, backoff.NewTestingBackOff()))
jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State)
// Now we update the pipeline
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"sh"},
[]string{"exit 1"},
nil,
client.NewAtomInput(dataRepo, "/"),
"",
true,
))
}
func TestAcceptReturnCode(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestAcceptReturnCode")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipelineName := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{pipelineName},
Transform: &pps.Transform{
Cmd: []string{"sh"},
Stdin: []string{"exit 1"},
AcceptReturnCode: []int64{1},
},
Input: client.NewAtomInput(dataRepo, "/*"),
},
)
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)
}
// TODO(msteffen): This test breaks the suite when run against cloud providers,
// because killing the pachd pod breaks the connection with pachctl port-forward
func TestRestartAll(t *testing.T) {
t.Skip("This is causing intermittent CI failures")
// this test cannot be run in parallel because it restarts everything which breaks other tests.
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestRestartAll_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
collectCommitInfos(t, commitIter)
restartAll(t)
// need a new client because the old one will have a defunct connection
c = getUsablePachClient(t)
// Wait a little for pipelines to restart
time.Sleep(10 * time.Second)
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
require.Equal(t, pps.PipelineState_PIPELINE_RUNNING, pipelineInfo.State)
_, err = c.InspectRepo(dataRepo)
require.NoError(t, err)
_, err = c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
}
// TODO(msteffen): This test breaks the suite when run against cloud providers,
// because killing the pachd pod breaks the connection with pachctl port-forward
func TestRestartOne(t *testing.T) {
t.Skip("This is causing intermittent CI failures")
// this test cannot be run in parallel because it restarts everything which breaks other tests.
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestRestartOne_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
collectCommitInfos(t, commitIter)
restartOne(t)
// need a new client because the old one will have a defunct connection
c = getUsablePachClient(t)
_, err = c.InspectPipeline(pipelineName)
require.NoError(t, err)
_, err = c.InspectRepo(dataRepo)
require.NoError(t, err)
_, err = c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
}
func TestPrettyPrinting(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPrettyPrinting_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{pipelineName},
Transform: &pps.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
ResourceRequests: &pps.ResourceSpec{
Memory: "100M",
Cpu: 0.5,
},
Input: client.NewAtomInput(dataRepo, "/*"),
})
require.NoError(t, err)
// Do a commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
repoInfo, err := c.InspectRepo(dataRepo)
require.NoError(t, err)
require.NoError(t, pfspretty.PrintDetailedRepoInfo(repoInfo))
for _, commitInfo := range commitInfos {
require.NoError(t, pfspretty.PrintDetailedCommitInfo(commitInfo))
}
fileInfo, err := c.InspectFile(dataRepo, commit.ID, "file")
require.NoError(t, err)
require.NoError(t, pfspretty.PrintDetailedFileInfo(fileInfo))
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
require.NoError(t, ppspretty.PrintDetailedPipelineInfo(pipelineInfo))
jobInfos, err := c.ListJob("", nil, nil)
require.NoError(t, err)
require.True(t, len(jobInfos) > 0)
require.NoError(t, ppspretty.PrintDetailedJobInfo(jobInfos[0]))
}
func TestDeleteAll(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
// this test cannot be run in parallel because it deletes everything
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestDeleteAll_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
// Do commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(collectCommitInfos(t, commitIter)))
require.NoError(t, c.DeleteAll())
repoInfos, err := c.ListRepo()
require.NoError(t, err)
require.Equal(t, 0, len(repoInfos))
pipelineInfos, err := c.ListPipeline()
require.NoError(t, err)
require.Equal(t, 0, len(pipelineInfos))
jobInfos, err := c.ListJob("", nil, nil)
require.NoError(t, err)
require.Equal(t, 0, len(jobInfos))
}
func TestRecursiveCp(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestRecursiveCp_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("TestRecursiveCp")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
fmt.Sprintf("cp -r /pfs/%s /pfs/out", dataRepo),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
// Do commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < 100; i++ {
_, err = c.PutFile(
dataRepo,
commit.ID,
fmt.Sprintf("file%d", i),
strings.NewReader(strings.Repeat("foo\n", 10000)),
)
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(collectCommitInfos(t, commitIter)))
}
func TestPipelineUniqueness(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
repo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/"),
"",
false,
))
err := c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/"),
"",
false,
)
require.YesError(t, err)
require.Matches(t, "pipeline .*? already exists", err.Error())
}
func TestUpdatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestUpdatePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{"echo foo >/pfs/out/file"},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("1"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
iter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
collectCommitInfos(t, iter)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, "master", "file", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
// Update the pipeline, this will not create a new pipeline as reprocess
// isn't set to true.
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{"echo bar >/pfs/out/file"},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
true,
))
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("2"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
iter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
collectCommitInfos(t, iter)
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, "master", "file", 0, 0, &buffer))
require.Equal(t, "bar\n", buffer.String())
// Update the pipeline again, this time with Reprocess: true set. Now we
// should see a different output file
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{"echo buzz >/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/*"),
Update: true,
Reprocess: true,
})
require.NoError(t, err)
iter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
collectCommitInfos(t, iter)
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, "master", "file", 0, 0, &buffer))
require.Equal(t, "buzz\n", buffer.String())
}
func TestUpdatePipelineRunningJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestUpdatePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{"sleep 1000"},
&pps.ParallelismSpec{
Constant: 2,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
numFiles := 50
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(""))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit2.ID, fmt.Sprintf("file-%d", i+numFiles), strings.NewReader(""))
}
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
b := backoff.NewTestingBackOff()
b.MaxElapsedTime = 30 * time.Second
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipelineName, nil, nil)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("wrong number of jobs")
}
if pps.JobState_JOB_RUNNING != jobInfos[0].State {
return fmt.Errorf("wrong state: %v for %s", jobInfos[0].State, jobInfos[0].Job.ID)
}
return nil
}, b))
// Update the pipeline. This will not create a new pipeline as reprocess
// isn't set to true.
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{"true"},
&pps.ParallelismSpec{
Constant: 2,
},
client.NewAtomInput(dataRepo, "/*"),
"",
true,
))
iter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
collectCommitInfos(t, iter)
// Currently, commits finish shortly before their respecive JobInfo documents
// are updated (the pipeline master receives the commit update and then
// updates the JobInfo document). Wait briefly for this to happen
time.Sleep(10 * time.Second)
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobInfos))
require.Equal(t, pps.JobState_JOB_SUCCESS.String(), jobInfos[0].State.String())
require.Equal(t, pps.JobState_JOB_KILLED.String(), jobInfos[1].State.String())
}
func TestManyFilesSingleCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestManyFilesSingleCommit_data")
require.NoError(t, c.CreateRepo(dataRepo))
// Request enough to require more than one page of results
numFiles := 20000
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, "master", fmt.Sprintf("file-%d", i), strings.NewReader(""))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, "master"))
fileInfos, err := c.ListFile(dataRepo, "master", "")
require.NoError(t, err)
require.Equal(t, numFiles, len(fileInfos))
}
func TestStopPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
// Stop the pipeline, so it doesn't process incoming commits
require.NoError(t, c.StopPipeline(pipelineName))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
// wait for 10 seconds and check that no commit has been outputted
time.Sleep(10 * time.Second)
commits, err := c.ListCommit(pipelineName, "master", "", 0)
require.NoError(t, err)
require.Equal(t, len(commits), 0)
// Restart pipeline, and make sure old commit is processed
require.NoError(t, c.StartPipeline(pipelineName))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
}
func TestStandby(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
t.Run("ChainOf10", func(t *testing.T) {
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestStandby_data")
require.NoError(t, c.CreateRepo(dataRepo))
numPipelines := 10
pipelines := make([]string, numPipelines)
for i := 0; i < numPipelines; i++ {
pipelines[i] = tu.UniqueString("TestStandby")
input := dataRepo
if i > 0 {
input = pipelines[i-1]
}
_, err := c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelines[i]),
Transform: &pps.Transform{
Cmd: []string{"true"},
},
Input: client.NewAtomInput(input, "/*"),
Standby: true,
},
)
require.NoError(t, err)
}
require.NoErrorWithinTRetry(t, time.Second*30, func() error {
pis, err := c.ListPipeline()
require.NoError(t, err)
var standby int
for _, pi := range pis {
if pi.State == pps.PipelineState_PIPELINE_STANDBY {
standby++
}
}
if standby != numPipelines {
return fmt.Errorf("should have %d pipelines in standby, not %d", numPipelines, standby)
}
return nil
})
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
var eg errgroup.Group
var finished bool
eg.Go(func() error {
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
collectCommitInfos(t, commitIter)
finished = true
return nil
})
eg.Go(func() error {
for !finished {
pis, err := c.ListPipeline()
require.NoError(t, err)
var active int
for _, pi := range pis {
if pi.State != pps.PipelineState_PIPELINE_STANDBY {
active++
}
}
// We tolerate having 2 pipelines out of standby because there's
// latency associated with entering and exiting standby.
require.True(t, active <= 2, "active: %d", active)
}
return nil
})
eg.Wait()
})
t.Run("ManyCommits", func(t *testing.T) {
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestStandby_data")
pipeline := tu.UniqueString("TestStandby")
require.NoError(t, c.CreateRepo(dataRepo))
_, err := c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"sh"},
Stdin: []string{"echo $PPS_POD_NAME >/pfs/out/pod"},
},
Input: client.NewAtomInput(dataRepo, "/"),
Standby: true,
},
)
require.NoError(t, err)
numCommits := 100
for i := 0; i < numCommits; i++ {
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
}
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
pod := ""
cis, err := c.ListCommit(pipeline, "master", "", 0)
require.NoError(t, err)
for _, ci := range cis {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipeline, ci.Commit.ID, "pod", 0, 0, &buffer))
if pod == "" {
pod = buffer.String()
} else {
require.True(t, pod == buffer.String(), "multiple pods were used to process commits")
}
}
pi, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, pps.PipelineState_PIPELINE_STANDBY.String(), pi.State.String())
})
}
func TestPipelineEnv(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
// make a secret to reference
k := getKubeClient(t)
secretName := tu.UniqueString("test-secret")
_, err := k.CoreV1().Secrets(v1.NamespaceDefault).Create(
&v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
},
Data: map[string][]byte{
"foo": []byte("foo\n"),
},
},
)
require.NoError(t, err)
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipelineEnv_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &pps.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"ls /var/secret",
"cat /var/secret/foo > /pfs/out/foo",
"echo $bar> /pfs/out/bar",
"echo $foo> /pfs/out/foo_env",
},
Env: map[string]string{"bar": "bar"},
Secrets: []*pps.Secret{
{
Name: secretName,
Key: "foo",
MountPath: "/var/secret",
EnvVar: "foo",
},
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/*"),
})
require.NoError(t, err)
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, commitInfos[0].Commit.ID, "foo", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, commitInfos[0].Commit.ID, "foo_env", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, commitInfos[0].Commit.ID, "bar", 0, 0, &buffer))
require.Equal(t, "bar\n", buffer.String())
}
func TestPipelineWithFullObjects(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
commitInfoIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, commit1.ID)}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitInfoIter)
require.Equal(t, 1, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
// Do second commit to repo
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
commitInfoIter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitInfoIter)
require.Equal(t, 1, len(commitInfos))
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\nbar\n", buffer.String())
}
func TestPipelineWithExistingInputCommits(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
// Do second commit to repo
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitInfoIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitInfoIter)
require.Equal(t, 1, len(commitInfos))
buffer := bytes.Buffer{}
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\nbar\n", buffer.String())
// Check that one output commit is created (processing the inputs' head commits)
commitInfos, err = c.ListCommit(pipelineName, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 1, len(commitInfos))
}
func TestPipelineThatSymlinks(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{
// Symlinks to input files
fmt.Sprintf("ln -s /pfs/%s/foo /pfs/out/foo", dataRepo),
fmt.Sprintf("ln -s /pfs/%s/dir1/bar /pfs/out/bar", dataRepo),
"mkdir /pfs/out/dir",
fmt.Sprintf("ln -s /pfs/%s/dir2 /pfs/out/dir/dir2", dataRepo),
// Symlinks to external files
"echo buzz > /tmp/buzz",
"ln -s /tmp/buzz /pfs/out/buzz",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "foo", strings.NewReader("foo"))
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "dir1/bar", strings.NewReader("bar"))
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "dir2/foo", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitInfoIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitInfoIter)
require.Equal(t, 1, len(commitInfos))
// Check that the output files are identical to the input files.
buffer := bytes.Buffer{}
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "foo", 0, 0, &buffer))
require.Equal(t, "foo", buffer.String())
buffer.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "bar", 0, 0, &buffer))
require.Equal(t, "bar", buffer.String())
buffer.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "dir/dir2/foo", 0, 0, &buffer))
require.Equal(t, "foo", buffer.String())
buffer.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "buzz", 0, 0, &buffer))
require.Equal(t, "buzz\n", buffer.String())
// Make sure that we skipped the upload by checking that the input file
// and the output file have the same object refs.
inputFooFileInfo, err := c.InspectFile(dataRepo, commit.ID, "foo")
require.NoError(t, err)
outputFooFileInfo, err := c.InspectFile(pipelineName, commitInfos[0].Commit.ID, "foo")
require.NoError(t, err)
require.Equal(t, inputFooFileInfo.Objects, outputFooFileInfo.Objects)
inputFooFileInfo, err = c.InspectFile(dataRepo, commit.ID, "dir1/bar")
require.NoError(t, err)
outputFooFileInfo, err = c.InspectFile(pipelineName, commitInfos[0].Commit.ID, "bar")
require.NoError(t, err)
require.Equal(t, inputFooFileInfo.Objects, outputFooFileInfo.Objects)
inputFooFileInfo, err = c.InspectFile(dataRepo, commit.ID, "dir2/foo")
require.NoError(t, err)
outputFooFileInfo, err = c.InspectFile(pipelineName, commitInfos[0].Commit.ID, "dir/dir2/foo")
require.NoError(t, err)
require.Equal(t, inputFooFileInfo.Objects, outputFooFileInfo.Objects)
}
// TestChainedPipelines tracks https://github.com/pachyderm/pachyderm/issues/797
func TestChainedPipelines(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
aRepo := tu.UniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
dRepo := tu.UniqueString("D")
require.NoError(t, c.CreateRepo(dRepo))
aCommit, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, "master"))
dCommit, err := c.StartCommit(dRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dRepo, "master", "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dRepo, "master"))
bPipeline := tu.UniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(aRepo, "/"),
"",
false,
))
cPipeline := tu.UniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("cp /pfs/%s/file /pfs/out/bFile", bPipeline),
fmt.Sprintf("cp /pfs/%s/file /pfs/out/dFile", dRepo)},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewAtomInput(bPipeline, "/"),
client.NewAtomInput(dRepo, "/"),
),
"",
false,
))
resultIter, err := c.FlushCommit([]*pfs.Commit{aCommit, dCommit}, nil)
require.NoError(t, err)
results := collectCommitInfos(t, resultIter)
require.Equal(t, 1, len(results))
require.Equal(t, cPipeline, results[0].Commit.Repo.Name)
var buf bytes.Buffer
require.NoError(t, c.GetFile(cPipeline, results[0].Commit.ID, "bFile", 0, 0, &buf))
require.Equal(t, "foo\n", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(cPipeline, results[0].Commit.ID, "dFile", 0, 0, &buf))
require.Equal(t, "bar\n", buf.String())
}
// DAG:
//
// A
// |
// B E
// | /
// C
// |
// D
func TestChainedPipelinesNoDelay(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
aRepo := tu.UniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
eRepo := tu.UniqueString("E")
require.NoError(t, c.CreateRepo(eRepo))
aCommit, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, "master"))
eCommit, err := c.StartCommit(eRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(eRepo, "master", "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(eRepo, "master"))
bPipeline := tu.UniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(aRepo, "/"),
"",
false,
))
cPipeline := tu.UniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("cp /pfs/%s/file /pfs/out/bFile", bPipeline),
fmt.Sprintf("cp /pfs/%s/file /pfs/out/eFile", eRepo)},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewAtomInput(bPipeline, "/"),
client.NewAtomInput(eRepo, "/"),
),
"",
false,
))
dPipeline := tu.UniqueString("D")
require.NoError(t, c.CreatePipeline(
dPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("cp /pfs/%s/bFile /pfs/out/bFile", cPipeline),
fmt.Sprintf("cp /pfs/%s/eFile /pfs/out/eFile", cPipeline)},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(cPipeline, "/"),
"",
false,
))
resultsIter, err := c.FlushCommit([]*pfs.Commit{aCommit, eCommit}, nil)
require.NoError(t, err)
results := collectCommitInfos(t, resultsIter)
require.Equal(t, 2, len(results))
eCommit2, err := c.StartCommit(eRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(eRepo, "master", "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(eRepo, "master"))
resultsIter, err = c.FlushCommit([]*pfs.Commit{eCommit2}, nil)
require.NoError(t, err)
results = collectCommitInfos(t, resultsIter)
require.Equal(t, 2, len(results))
// Get number of jobs triggered in pipeline D
jobInfos, err := c.ListJob(dPipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobInfos))
}
func collectCommitInfos(t testing.TB, commitInfoIter client.CommitInfoIterator) []*pfs.CommitInfo {
var commitInfos []*pfs.CommitInfo
for {
commitInfo, err := commitInfoIter.Next()
if err == io.EOF {
return commitInfos
}
require.NoError(t, err)
commitInfos = append(commitInfos, commitInfo)
}
}
func TestParallelismSpec(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
kubeclient := getKubeClient(t)
nodes, err := kubeclient.CoreV1().Nodes().List(metav1.ListOptions{})
numNodes := len(nodes.Items)
// Test Constant strategy
parellelism, err := ppsutil.GetExpectedNumWorkers(getKubeClient(t), &pps.ParallelismSpec{
Constant: 7,
})
require.NoError(t, err)
require.Equal(t, 7, parellelism)
// Coefficient == 1 (basic test)
// TODO(msteffen): This test can fail when run against cloud providers, if the
// remote cluster has more than one node (in which case "Coefficient: 1" will
// cause more than 1 worker to start)
parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, &pps.ParallelismSpec{
Coefficient: 1,
})
require.NoError(t, err)
require.Equal(t, numNodes, parellelism)
// Coefficient > 1
parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, &pps.ParallelismSpec{
Coefficient: 2,
})
require.NoError(t, err)
require.Equal(t, 2*numNodes, parellelism)
// Make sure we start at least one worker
parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, &pps.ParallelismSpec{
Coefficient: 0.01,
})
require.NoError(t, err)
require.Equal(t, 1, parellelism)
// Test 0-initialized JobSpec
parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, &pps.ParallelismSpec{})
require.NoError(t, err)
require.Equal(t, 1, parellelism)
// Test nil JobSpec
parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, nil)
require.NoError(t, err)
require.Equal(t, 1, parellelism)
}
func TestPipelineJobDeletion(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
_, err = commitIter.Next()
require.NoError(t, err)
// Now delete the corresponding job
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
err = c.DeleteJob(jobInfos[0].Job.ID)
require.NoError(t, err)
}
func TestStopJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestStopJob")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline-stop-job")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sleep", "20"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
// Create two input commits to trigger two jobs.
// We will stop the first job midway through, and assert that the
// second job finishes.
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
var jobID string
b := backoff.NewTestingBackOff()
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
if len(jobInfos) != 1 {
return fmt.Errorf("len(jobInfos) should be 1")
}
jobID = jobInfos[0].Job.ID
if pps.JobState_JOB_RUNNING != jobInfos[0].State {
return fmt.Errorf("jobInfos[0] has the wrong state")
}
return nil
}, b))
// Now stop the first job
err = c.StopJob(jobID)
require.NoError(t, err)
jobInfo, err := c.InspectJob(jobID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_KILLED, jobInfo.State)
b.Reset()
// Check that the second job completes
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
if len(jobInfos) != 2 {
return fmt.Errorf("len(jobInfos) should be 2")
}
jobID = jobInfos[0].Job.ID
return nil
}, b))
jobInfo, err = c.InspectJob(jobID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)
}
func TestGetLogs(t *testing.T) {
testGetLogs(t, false)
}
func TestGetLogsWithStats(t *testing.T) {
testGetLogs(t, true)
}
func testGetLogs(t *testing.T, enableStats bool) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
iter := c.GetLogs("", "", nil, "", false, false, 0)
for iter.Next() {
}
require.NoError(t, iter.Err())
// create repos
dataRepo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &pps.Transform{
Cmd: []string{"sh"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/file /pfs/out/file", dataRepo),
"echo foo",
"echo %s", // %s tests a formatting bug we had (#2729)
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: enableStats,
ParallelismSpec: &pps.ParallelismSpec{
Constant: 4,
},
})
require.NoError(t, err)
// Commit data to repo and flush commit
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
_, err = c.FlushJobAll([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
// Get logs from pipeline, using pipeline
iter = c.GetLogs(pipelineName, "", nil, "", false, false, 0)
var numLogs int
var loglines []string
for iter.Next() {
if !iter.Message().User {
continue
}
numLogs++
require.True(t, iter.Message().Message != "")
loglines = append(loglines, strings.TrimSuffix(iter.Message().Message, "\n"))
require.False(t, strings.Contains(iter.Message().Message, "MISSING"), iter.Message().Message)
}
require.True(t, numLogs >= 2, "logs:\n%s", strings.Join(loglines, "\n"))
require.NoError(t, iter.Err())
// Get logs from pipeline, using pipeline (tailing the last two log lines)
iter = c.GetLogs(pipelineName, "", nil, "", false, false, 2)
numLogs = 0
loglines = []string{}
for iter.Next() {
numLogs++
require.True(t, iter.Message().Message != "")
loglines = append(loglines, strings.TrimSuffix(iter.Message().Message, "\n"))
}
require.True(t, numLogs >= 2, "logs:\n%s", strings.Join(loglines, "\n"))
require.NoError(t, iter.Err())
// Get logs from pipeline, using a pipeline that doesn't exist. There should
// be an error
iter = c.GetLogs("__DOES_NOT_EXIST__", "", nil, "", false, false, 0)
require.False(t, iter.Next())
require.YesError(t, iter.Err())
require.Matches(t, "could not get", iter.Err().Error())
// Get logs from pipeline, using job
// (1) Get job ID, from pipeline that just ran
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
require.True(t, len(jobInfos) == 1)
// (2) Get logs using extracted job ID
// wait for logs to be collected
time.Sleep(10 * time.Second)
iter = c.GetLogs("", jobInfos[0].Job.ID, nil, "", false, false, 0)
numLogs = 0
for iter.Next() {
numLogs++
require.True(t, iter.Message().Message != "")
}
// Make sure that we've seen some logs
require.NoError(t, iter.Err())
require.True(t, numLogs > 0)
// Get logs for datums but don't specify pipeline or job. These should error
iter = c.GetLogs("", "", []string{"/foo"}, "", false, false, 0)
require.False(t, iter.Next())
require.YesError(t, iter.Err())
resp, err := c.ListDatum(jobInfos[0].Job.ID, 0, 0)
require.NoError(t, err)
require.True(t, len(resp.DatumInfos) > 0)
iter = c.GetLogs("", "", nil, resp.DatumInfos[0].Datum.ID, false, false, 0)
require.False(t, iter.Next())
require.YesError(t, iter.Err())
// Get logs from pipeline, using a job that doesn't exist. There should
// be an error
iter = c.GetLogs("", "__DOES_NOT_EXIST__", nil, "", false, false, 0)
require.False(t, iter.Next())
require.YesError(t, iter.Err())
require.Matches(t, "could not get", iter.Err().Error())
// Filter logs based on input (using file that exists). Get logs using file
// path, hex hash, and base64 hash, and make sure you get the same log lines
fileInfo, err := c.InspectFile(dataRepo, commit.ID, "/file")
require.NoError(t, err)
// TODO(msteffen) This code shouldn't be wrapped in a backoff, but for some
// reason GetLogs is not yet 100% consistent. This reduces flakes in testing.
require.NoError(t, backoff.Retry(func() error {
pathLog := c.GetLogs("", jobInfos[0].Job.ID, []string{"/file"}, "", false, false, 0)
hexHash := "19fdf57bdf9eb5a9602bfa9c0e6dd7ed3835f8fd431d915003ea82747707be66"
require.Equal(t, hexHash, hex.EncodeToString(fileInfo.Hash)) // sanity-check test
hexLog := c.GetLogs("", jobInfos[0].Job.ID, []string{hexHash}, "", false, false, 0)
base64Hash := "Gf31e9+etalgK/qcDm3X7Tg1+P1DHZFQA+qCdHcHvmY="
require.Equal(t, base64Hash, base64.StdEncoding.EncodeToString(fileInfo.Hash))
base64Log := c.GetLogs("", jobInfos[0].Job.ID, []string{base64Hash}, "", false, false, 0)
numLogs = 0
for {
havePathLog, haveHexLog, haveBase64Log := pathLog.Next(), hexLog.Next(), base64Log.Next()
if havePathLog != haveHexLog || haveHexLog != haveBase64Log {
return fmt.Errorf("Unequal log lengths")
}
if !havePathLog {
break
}
numLogs++
if pathLog.Message().Message != hexLog.Message().Message ||
hexLog.Message().Message != base64Log.Message().Message {
return fmt.Errorf(
"unequal logs, pathLogs: \"%s\" hexLog: \"%s\" base64Log: \"%s\"",
pathLog.Message().Message,
hexLog.Message().Message,
base64Log.Message().Message)
}
}
for _, logsiter := range []*client.LogsIter{pathLog, hexLog, base64Log} {
if logsiter.Err() != nil {
return logsiter.Err()
}
}
if numLogs == 0 {
return fmt.Errorf("no logs found")
}
return nil
}, backoff.NewTestingBackOff()))
// Filter logs based on input (using file that doesn't exist). There should
// be no logs
iter = c.GetLogs("", jobInfos[0].Job.ID, []string{"__DOES_NOT_EXIST__"}, "", false, false, 0)
require.False(t, iter.Next())
require.NoError(t, iter.Err())
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
iter = c.WithCtx(ctx).GetLogs(pipelineName, "", nil, "", false, false, 0)
numLogs = 0
for iter.Next() {
numLogs++
if numLogs == 8 {
// Do another commit so there's logs to receive with follow
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
}
require.True(t, iter.Message().Message != "")
if numLogs == 16 {
break
}
}
require.NoError(t, iter.Err())
}
func TestPfsPutFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
repo1 := tu.UniqueString("TestPfsPutFile1")
require.NoError(t, c.CreateRepo(repo1))
repo2 := tu.UniqueString("TestPfsPutFile2")
require.NoError(t, c.CreateRepo(repo2))
commit1, err := c.StartCommit(repo1, "")
require.NoError(t, err)
_, err = c.PutFile(repo1, commit1.ID, "file1", strings.NewReader("foo\n"))
require.NoError(t, err)
_, err = c.PutFile(repo1, commit1.ID, "file2", strings.NewReader("bar\n"))
require.NoError(t, err)
_, err = c.PutFile(repo1, commit1.ID, "dir1/file3", strings.NewReader("fizz\n"))
require.NoError(t, err)
for i := 0; i < 100; i++ {
_, err = c.PutFile(repo1, commit1.ID, fmt.Sprintf("dir1/dir2/file%d", i), strings.NewReader(fmt.Sprintf("content%d\n", i)))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(repo1, commit1.ID))
commit2, err := c.StartCommit(repo2, "")
require.NoError(t, err)
err = c.PutFileURL(repo2, commit2.ID, "file", fmt.Sprintf("pfs://0.0.0.0:650/%s/%s/file1", repo1, commit1.ID), false, false)
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo2, commit2.ID))
var buf bytes.Buffer
require.NoError(t, c.GetFile(repo2, commit2.ID, "file", 0, 0, &buf))
require.Equal(t, "foo\n", buf.String())
commit3, err := c.StartCommit(repo2, "")
require.NoError(t, err)
err = c.PutFileURL(repo2, commit3.ID, "", fmt.Sprintf("pfs://0.0.0.0:650/%s/%s", repo1, commit1.ID), true, false)
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo2, commit3.ID))
buf = bytes.Buffer{}
require.NoError(t, c.GetFile(repo2, commit3.ID, "file1", 0, 0, &buf))
require.Equal(t, "foo\n", buf.String())
buf = bytes.Buffer{}
require.NoError(t, c.GetFile(repo2, commit3.ID, "file2", 0, 0, &buf))
require.Equal(t, "bar\n", buf.String())
buf = bytes.Buffer{}
require.NoError(t, c.GetFile(repo2, commit3.ID, "dir1/file3", 0, 0, &buf))
require.Equal(t, "fizz\n", buf.String())
for i := 0; i < 100; i++ {
buf = bytes.Buffer{}
require.NoError(t, c.GetFile(repo2, commit3.ID, fmt.Sprintf("dir1/dir2/file%d", i), 0, 0, &buf))
require.Equal(t, fmt.Sprintf("content%d\n", i), buf.String())
}
}
func TestAllDatumsAreProcessed(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo1 := tu.UniqueString("TestAllDatumsAreProcessed_data1")
require.NoError(t, c.CreateRepo(dataRepo1))
dataRepo2 := tu.UniqueString("TestAllDatumsAreProcessed_data2")
require.NoError(t, c.CreateRepo(dataRepo2))
commit1, err := c.StartCommit(dataRepo1, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo1, "master", "file1", strings.NewReader("foo\n"))
require.NoError(t, err)
_, err = c.PutFile(dataRepo1, "master", "file2", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo1, "master"))
commit2, err := c.StartCommit(dataRepo2, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo2, "master", "file1", strings.NewReader("foo\n"))
require.NoError(t, err)
_, err = c.PutFile(dataRepo2, "master", "file2", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo2, "master"))
require.NoError(t, c.CreatePipeline(
tu.UniqueString("TestAllDatumsAreProcessed_pipelines"),
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/%s/* /pfs/%s/* > /pfs/out/file", dataRepo1, dataRepo2),
},
nil,
client.NewCrossInput(
client.NewAtomInput(dataRepo1, "/*"),
client.NewAtomInput(dataRepo2, "/*"),
),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1, commit2}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
// should be 8 because each file gets copied twice due to cross product
require.Equal(t, strings.Repeat("foo\n", 8), buf.String())
}
func TestDatumStatusRestart(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestDatumDedup_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
// This pipeline sleeps for 20 secs per datum
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"sleep 20",
},
nil,
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
var jobID string
var datumStarted time.Time
// checkStatus waits for 'pipeline' to start and makes sure that each time
// it's called, the datum being processes was started at a new and later time
// (than the last time checkStatus was called)
checkStatus := func() {
require.NoError(t, backoff.Retry(func() error {
// get the
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
if len(jobs) == 0 {
return fmt.Errorf("no jobs found")
}
jobID = jobs[0].Job.ID
jobInfo, err := c.InspectJob(jobs[0].Job.ID, false)
require.NoError(t, err)
if len(jobInfo.WorkerStatus) == 0 {
return fmt.Errorf("no worker statuses")
}
if jobInfo.WorkerStatus[0].JobID == jobInfo.Job.ID {
// The first time this function is called, datumStarted is zero
// so `Before` is true for any non-zero time.
_datumStarted, err := types.TimestampFromProto(jobInfo.WorkerStatus[0].Started)
require.NoError(t, err)
require.True(t, datumStarted.Before(_datumStarted))
datumStarted = _datumStarted
return nil
}
return fmt.Errorf("worker status from wrong job")
}, backoff.RetryEvery(time.Second).For(30*time.Second)))
}
checkStatus()
require.NoError(t, c.RestartDatum(jobID, []string{"/file"}))
checkStatus()
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
}
func TestUseMultipleWorkers(t *testing.T) {
t.Skip("flaky")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestUseMultipleWorkers_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < 20; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
// This pipeline sleeps for 10 secs per datum
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"sleep 10",
},
&pps.ParallelismSpec{
Constant: 2,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
// Get job info 2x/sec for 20s until we confirm two workers for the current job
require.NoError(t, backoff.Retry(func() error {
jobs, err := c.ListJob(pipeline, nil, nil)
if err != nil {
return fmt.Errorf("could not list job: %s", err.Error())
}
if len(jobs) == 0 {
return fmt.Errorf("failed to find job")
}
jobInfo, err := c.InspectJob(jobs[0].Job.ID, false)
if err != nil {
return fmt.Errorf("could not inspect job: %s", err.Error())
}
if len(jobInfo.WorkerStatus) != 2 {
return fmt.Errorf("incorrect number of statuses: %v", len(jobInfo.WorkerStatus))
}
return nil
}, backoff.RetryEvery(500*time.Millisecond).For(20*time.Second)))
}
// TestSystemResourceRequest doesn't create any jobs or pipelines, it
// just makes sure that when pachyderm is deployed, we give rethinkdb, pachd,
// and etcd default resource requests. This prevents them from overloading
// nodes and getting evicted, which can slow down or break a cluster.
func TestSystemResourceRequests(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
kubeClient := getKubeClient(t)
// Expected resource requests for pachyderm system pods:
defaultLocalMem := map[string]string{
"pachd": "512M",
"etcd": "256M",
}
defaultLocalCPU := map[string]string{
"pachd": "250m",
"etcd": "250m",
}
defaultCloudMem := map[string]string{
"pachd": "3G",
"etcd": "2G",
}
defaultCloudCPU := map[string]string{
"pachd": "1",
"etcd": "1",
}
// Get Pod info for 'app' from k8s
var c v1.Container
for _, app := range []string{"pachd", "etcd"} {
err := backoff.Retry(func() error {
podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List(
metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector(
map[string]string{"app": app, "suite": "pachyderm"},
)),
})
if err != nil {
return err
}
if len(podList.Items) < 1 {
return fmt.Errorf("could not find pod for %s", app) // retry
}
c = podList.Items[0].Spec.Containers[0]
return nil
}, backoff.NewTestingBackOff())
require.NoError(t, err)
// Make sure the pod's container has resource requests
cpu, ok := c.Resources.Requests[v1.ResourceCPU]
require.True(t, ok, "could not get CPU request for "+app)
require.True(t, cpu.String() == defaultLocalCPU[app] ||
cpu.String() == defaultCloudCPU[app])
mem, ok := c.Resources.Requests[v1.ResourceMemory]
require.True(t, ok, "could not get memory request for "+app)
require.True(t, mem.String() == defaultLocalMem[app] ||
mem.String() == defaultCloudMem[app])
}
}
// TestPipelineResourceRequest creates a pipeline with a resource request, and
// makes sure that's passed to k8s (by inspecting the pipeline's pods)
func TestPipelineResourceRequest(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipelineResourceRequest")
pipelineName := tu.UniqueString("TestPipelineResourceRequest_Pipeline")
require.NoError(t, c.CreateRepo(dataRepo))
// Resources are not yet in client.CreatePipeline() (we may add them later)
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{pipelineName},
Transform: &pps.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
ResourceRequests: &pps.ResourceSpec{
Memory: "100M",
Cpu: 0.5,
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Branch: "master",
Glob: "/*",
},
},
})
require.NoError(t, err)
// Get info about the pipeline pods from k8s & check for resources
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
var container v1.Container
rcName := ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version)
kubeClient := getKubeClient(t)
err = backoff.Retry(func() error {
podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List(
metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector(
map[string]string{"app": rcName},
)),
})
if err != nil {
return err // retry
}
if len(podList.Items) != 1 || len(podList.Items[0].Spec.Containers) == 0 {
return fmt.Errorf("could not find single container for pipeline %s", pipelineInfo.Pipeline.Name)
}
container = podList.Items[0].Spec.Containers[0]
return nil // no more retries
}, backoff.NewTestingBackOff())
require.NoError(t, err)
// Make sure a CPU and Memory request are both set
cpu, ok := container.Resources.Requests[v1.ResourceCPU]
require.True(t, ok)
require.Equal(t, "500m", cpu.String())
mem, ok := container.Resources.Requests[v1.ResourceMemory]
require.True(t, ok)
require.Equal(t, "100M", mem.String())
_, ok = container.Resources.Requests[v1.ResourceNvidiaGPU]
require.False(t, ok)
}
func TestPipelineResourceLimit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipelineResourceLimit")
pipelineName := tu.UniqueString("TestPipelineResourceLimit_Pipeline")
require.NoError(t, c.CreateRepo(dataRepo))
// Resources are not yet in client.CreatePipeline() (we may add them later)
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{pipelineName},
Transform: &pps.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
ResourceLimits: &pps.ResourceSpec{
Memory: "100M",
Cpu: 0.5,
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Branch: "master",
Glob: "/*",
},
},
})
require.NoError(t, err)
// Get info about the pipeline pods from k8s & check for resources
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
var container v1.Container
rcName := ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version)
kubeClient := getKubeClient(t)
err = backoff.Retry(func() error {
podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List(metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector(
map[string]string{"app": rcName, "suite": "pachyderm"},
)),
})
if err != nil {
return err // retry
}
if len(podList.Items) != 1 || len(podList.Items[0].Spec.Containers) == 0 {
return fmt.Errorf("could not find single container for pipeline %s", pipelineInfo.Pipeline.Name)
}
container = podList.Items[0].Spec.Containers[0]
return nil // no more retries
}, backoff.NewTestingBackOff())
require.NoError(t, err)
// Make sure a CPU and Memory request are both set
cpu, ok := container.Resources.Limits[v1.ResourceCPU]
require.True(t, ok)
require.Equal(t, "500m", cpu.String())
mem, ok := container.Resources.Limits[v1.ResourceMemory]
require.True(t, ok)
require.Equal(t, "100M", mem.String())
_, ok = container.Resources.Requests[v1.ResourceNvidiaGPU]
require.False(t, ok)
}
func TestPipelineResourceLimitDefaults(t *testing.T) {
// We need to make sure GPU is set to 0 for k8s 1.8
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipelineResourceLimit")
pipelineName := tu.UniqueString("TestPipelineResourceLimit_Pipeline")
require.NoError(t, c.CreateRepo(dataRepo))
// Resources are not yet in client.CreatePipeline() (we may add them later)
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{pipelineName},
Transform: &pps.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Branch: "master",
Glob: "/*",
},
},
})
require.NoError(t, err)
// Get info about the pipeline pods from k8s & check for resources
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
var container v1.Container
rcName := ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version)
kubeClient := getKubeClient(t)
err = backoff.Retry(func() error {
podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List(metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector(
map[string]string{"app": rcName, "suite": "pachyderm"},
)),
})
if err != nil {
return err // retry
}
if len(podList.Items) != 1 || len(podList.Items[0].Spec.Containers) == 0 {
return fmt.Errorf("could not find single container for pipeline %s", pipelineInfo.Pipeline.Name)
}
container = podList.Items[0].Spec.Containers[0]
return nil // no more retries
}, backoff.NewTestingBackOff())
require.NoError(t, err)
_, ok := container.Resources.Requests[v1.ResourceNvidiaGPU]
require.False(t, ok)
}
func TestPipelinePartialResourceRequest(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipelinePartialResourceRequest")
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreateRepo(dataRepo))
// Resources are not yet in client.CreatePipeline() (we may add them later)
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{fmt.Sprintf("%s-%d", pipelineName, 0)},
Transform: &pps.Transform{
Cmd: []string{"true"},
},
ResourceRequests: &pps.ResourceSpec{
Cpu: 0.5,
Memory: "100M",
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Branch: "master",
Glob: "/*",
},
},
})
require.NoError(t, err)
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{fmt.Sprintf("%s-%d", pipelineName, 1)},
Transform: &pps.Transform{
Cmd: []string{"true"},
},
ResourceRequests: &pps.ResourceSpec{
Memory: "100M",
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Branch: "master",
Glob: "/*",
},
},
})
require.NoError(t, err)
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{fmt.Sprintf("%s-%d", pipelineName, 2)},
Transform: &pps.Transform{
Cmd: []string{"true"},
},
ResourceRequests: &pps.ResourceSpec{},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Branch: "master",
Glob: "/*",
},
},
})
require.NoError(t, err)
require.NoError(t, backoff.Retry(func() error {
for i := 0; i < 3; i++ {
pipelineInfo, err := c.InspectPipeline(fmt.Sprintf("%s-%d", pipelineName, i))
require.NoError(t, err)
if pipelineInfo.State != pps.PipelineState_PIPELINE_RUNNING {
return fmt.Errorf("pipeline not in running state")
}
}
return nil
}, backoff.NewTestingBackOff()))
}
func TestPipelineLargeOutput(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineInputDataModification_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 100
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(""))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"for i in `seq 1 100`; do touch /pfs/out/$RANDOM; done",
},
&pps.ParallelismSpec{
Constant: 4,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
}
func TestUnionInput(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
var repos []string
for i := 0; i < 4; i++ {
repos = append(repos, tu.UniqueString("TestUnionInput"))
require.NoError(t, c.CreateRepo(repos[i]))
}
numFiles := 2
var commits []*pfs.Commit
for _, repo := range repos {
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
commits = append(commits, commit)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(repo, "master", fmt.Sprintf("file-%d", i), strings.NewReader(fmt.Sprintf("%d", i)))
}
require.NoError(t, c.FinishCommit(repo, "master"))
}
t.Run("union all", func(t *testing.T) {
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp /pfs/*/* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewUnionInput(
client.NewAtomInput(repos[0], "/*"),
client.NewAtomInput(repos[1], "/*"),
client.NewAtomInput(repos[2], "/*"),
client.NewAtomInput(repos[3], "/*"),
),
"",
false,
))
commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
outCommit := commitInfos[0].Commit
fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, "")
require.NoError(t, err)
require.Equal(t, 2, len(fileInfos))
for _, fi := range fileInfos {
// 1 byte per repo
require.Equal(t, uint64(len(repos)), fi.SizeBytes)
}
})
t.Run("union crosses", func(t *testing.T) {
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/TestUnionInput* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewUnionInput(
client.NewCrossInput(
client.NewAtomInput(repos[0], "/*"),
client.NewAtomInput(repos[1], "/*"),
),
client.NewCrossInput(
client.NewAtomInput(repos[2], "/*"),
client.NewAtomInput(repos[3], "/*"),
),
),
"",
false,
))
commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
outCommit := commitInfos[0].Commit
for _, repo := range repos {
fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, repo)
require.NoError(t, err)
require.Equal(t, 2, len(fileInfos))
for _, fi := range fileInfos {
// each file should be seen twice
require.Equal(t, uint64(2), fi.SizeBytes)
}
}
})
t.Run("cross unions", func(t *testing.T) {
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/TestUnionInput* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewUnionInput(
client.NewAtomInput(repos[0], "/*"),
client.NewAtomInput(repos[1], "/*"),
),
client.NewUnionInput(
client.NewAtomInput(repos[2], "/*"),
client.NewAtomInput(repos[3], "/*"),
),
),
"",
false,
))
commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
outCommit := commitInfos[0].Commit
for _, repo := range repos {
fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, repo)
require.NoError(t, err)
require.Equal(t, 2, len(fileInfos))
for _, fi := range fileInfos {
// each file should be seen twice
require.Equal(t, uint64(4), fi.SizeBytes)
}
}
})
t.Run("union alias", func(t *testing.T) {
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/in /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewUnionInput(
client.NewAtomInputOpts("in", repos[0], "", "/*", false),
client.NewAtomInputOpts("in", repos[1], "", "/*", false),
client.NewAtomInputOpts("in", repos[2], "", "/*", false),
client.NewAtomInputOpts("in", repos[3], "", "/*", false),
),
"",
false,
))
commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
outCommit := commitInfos[0].Commit
fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, "in")
require.NoError(t, err)
require.Equal(t, 2, len(fileInfos))
for _, fi := range fileInfos {
require.Equal(t, uint64(4), fi.SizeBytes)
}
})
t.Run("union cross alias", func(t *testing.T) {
pipeline := tu.UniqueString("pipeline")
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/in* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewUnionInput(
client.NewCrossInput(
client.NewAtomInputOpts("in1", repos[0], "", "/*", false),
client.NewAtomInputOpts("in1", repos[1], "", "/*", false),
),
client.NewCrossInput(
client.NewAtomInputOpts("in2", repos[2], "", "/*", false),
client.NewAtomInputOpts("in2", repos[3], "", "/*", false),
),
),
"",
false,
))
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/in* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewUnionInput(
client.NewCrossInput(
client.NewAtomInputOpts("in1", repos[0], "", "/*", false),
client.NewAtomInputOpts("in2", repos[1], "", "/*", false),
),
client.NewCrossInput(
client.NewAtomInputOpts("in1", repos[2], "", "/*", false),
client.NewAtomInputOpts("in2", repos[3], "", "/*", false),
),
),
"",
false,
))
commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
outCommit := commitInfos[0].Commit
for _, dir := range []string{"in1", "in2"} {
fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, dir)
require.NoError(t, err)
require.Equal(t, 2, len(fileInfos))
for _, fi := range fileInfos {
// each file should be seen twice
require.Equal(t, uint64(4), fi.SizeBytes)
}
}
})
t.Run("cross union alias", func(t *testing.T) {
pipeline := tu.UniqueString("pipeline")
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/in* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewUnionInput(
client.NewAtomInputOpts("in1", repos[0], "", "/*", false),
client.NewAtomInputOpts("in2", repos[1], "", "/*", false),
),
client.NewUnionInput(
client.NewAtomInputOpts("in1", repos[2], "", "/*", false),
client.NewAtomInputOpts("in2", repos[3], "", "/*", false),
),
),
"",
false,
))
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/in* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewUnionInput(
client.NewAtomInputOpts("in1", repos[0], "", "/*", false),
client.NewAtomInputOpts("in1", repos[1], "", "/*", false),
),
client.NewUnionInput(
client.NewAtomInputOpts("in2", repos[2], "", "/*", false),
client.NewAtomInputOpts("in2", repos[3], "", "/*", false),
),
),
"",
false,
))
commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
outCommit := commitInfos[0].Commit
for _, dir := range []string{"in1", "in2"} {
fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, dir)
require.NoError(t, err)
require.Equal(t, 2, len(fileInfos))
for _, fi := range fileInfos {
// each file should be seen twice
require.Equal(t, uint64(8), fi.SizeBytes)
}
}
})
}
func TestIncrementalOverwritePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestIncrementalOverwritePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"touch /pfs/out/sum",
fmt.Sprintf("SUM=`cat /pfs/%s/data /pfs/out/sum | awk '{sum+=$1} END {print sum}'`", dataRepo),
"echo $SUM > /pfs/out/sum",
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/"),
Incremental: true,
})
require.NoError(t, err)
expectedValue := 0
for i := 0; i <= 150; i++ {
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.DeleteFile(dataRepo, "master", "data"))
_, err = c.PutFile(dataRepo, "master", "data", strings.NewReader(fmt.Sprintf("%d\n", i)))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
expectedValue += i
}
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(pipeline, "master", "sum", 0, 0, &buf))
require.Equal(t, fmt.Sprintf("%d\n", expectedValue), buf.String())
}
func TestIncrementalAppendPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestIncrementalAppendPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"touch /pfs/out/sum",
fmt.Sprintf("SUM=`cat /pfs/%s/data/* /pfs/out/sum | awk '{sum+=$1} END {print sum}'`", dataRepo),
"echo $SUM > /pfs/out/sum",
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/"),
Incremental: true,
})
require.NoError(t, err)
expectedValue := 0
for i := 0; i <= 150; i++ {
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
w, err := c.PutFileSplitWriter(dataRepo, "master", "data", pfs.Delimiter_LINE, 0, 0, false)
require.NoError(t, err)
_, err = w.Write([]byte(fmt.Sprintf("%d\n", i)))
require.NoError(t, err)
require.NoError(t, w.Close())
require.NoError(t, c.FinishCommit(dataRepo, "master"))
expectedValue += i
}
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(pipeline, "master", "sum", 0, 0, &buf))
require.Equal(t, fmt.Sprintf("%d\n", expectedValue), buf.String())
}
func TestIncrementalOneFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestIncrementalOneFile")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"find /pfs",
fmt.Sprintf("cp /pfs/%s/dir/file /pfs/out/file", dataRepo),
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/dir/file"),
Incremental: true,
})
require.NoError(t, err)
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "/dir/file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "/dir/file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(pipeline, "master", "file", 0, 0, &buf))
require.Equal(t, "foo\nbar\n", buf.String())
}
func TestIncrementalFailure(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestIncrementalFailure_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline := tu.UniqueString("TestIncrementalFailure")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/file /pfs/out/file", dataRepo),
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/"),
Incremental: true,
})
require.NoError(t, err)
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
require.NoError(t, err)
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "/file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(pipeline, "master", "file", 0, 0, &buf))
require.Equal(t, "foo\n", buf.String())
}
func TestGarbageCollection(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
// Delete everything, then run garbage collection and finally check that
// we're at a baseline of 0 tags and 0 objects.
require.NoError(t, c.DeleteAll())
require.NoError(t, c.GarbageCollect())
originalObjects := getAllObjects(t, c)
originalTags := getAllTags(t, c)
require.Equal(t, 0, len(originalObjects))
require.Equal(t, 0, len(originalTags))
dataRepo := tu.UniqueString("TestGarbageCollection")
pipeline := tu.UniqueString("TestGarbageCollectionPipeline")
var commit *pfs.Commit
var err error
createInputAndPipeline := func() {
require.NoError(t, c.CreateRepo(dataRepo))
commit, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "foo", strings.NewReader("foo"))
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "bar", strings.NewReader("bar"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
// This pipeline copies foo and modifies bar
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/foo /pfs/out/foo", dataRepo),
fmt.Sprintf("cp /pfs/%s/bar /pfs/out/bar", dataRepo),
"echo bar >> /pfs/out/bar",
},
nil,
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
}
createInputAndPipeline()
objectsBefore := getAllObjects(t, c)
tagsBefore := getAllTags(t, c)
// Try to GC without stopping the pipeline.
require.YesError(t, c.GarbageCollect())
// Now stop the pipeline and GC
require.NoError(t, c.StopPipeline(pipeline))
require.NoError(t, backoff.Retry(c.GarbageCollect, backoff.NewTestingBackOff()))
// Check that data still exists in the input repo
var buf bytes.Buffer
require.NoError(t, c.GetFile(dataRepo, commit.ID, "foo", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(dataRepo, commit.ID, "bar", 0, 0, &buf))
require.Equal(t, "bar", buf.String())
pis, err := c.ListPipeline()
require.NoError(t, err)
require.Equal(t, 1, len(pis))
buf.Reset()
require.NoError(t, c.GetFile(pipeline, "master", "foo", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(pipeline, "master", "bar", 0, 0, &buf))
require.Equal(t, "barbar\n", buf.String())
// Check that no objects or tags have been removed, since we just ran GC
// without deleting anything.
objectsAfter := getAllObjects(t, c)
tagsAfter := getAllTags(t, c)
require.Equal(t, len(tagsBefore), len(tagsAfter))
require.Equal(t, len(objectsBefore), len(objectsAfter))
objectsBefore = objectsAfter
tagsBefore = tagsAfter
// Now delete the pipeline and GC
require.NoError(t, c.DeletePipeline(pipeline))
require.NoError(t, c.GarbageCollect())
// We should've deleted one tag since the pipeline has only processed
// one datum.
// We should've deleted 3 objects: the object referenced by
// the tag, the modified "bar" file and the pipeline's spec.
objectsAfter = getAllObjects(t, c)
tagsAfter = getAllTags(t, c)
require.Equal(t, 1, len(tagsBefore)-len(tagsAfter))
require.Equal(t, 3, len(objectsBefore)-len(objectsAfter))
// Now we delete everything.
require.NoError(t, c.DeleteAll())
require.NoError(t, c.GarbageCollect())
// Since we've now deleted everything that we created in this test,
// the tag count and object count should be back to the originals.
objectsAfter = getAllObjects(t, c)
tagsAfter = getAllTags(t, c)
require.Equal(t, 0, len(tagsAfter))
require.Equal(t, 0, len(objectsAfter))
// Now we create the pipeline again and check that all data is
// accessible. This is important because there used to be a bug
// where we failed to invalidate the cache such that the objects in
// the cache were referencing blocks that had been GC-ed.
createInputAndPipeline()
buf.Reset()
require.NoError(t, c.GetFile(dataRepo, commit.ID, "foo", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(dataRepo, commit.ID, "bar", 0, 0, &buf))
require.Equal(t, "bar", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(pipeline, "master", "foo", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(pipeline, "master", "bar", 0, 0, &buf))
require.Equal(t, "barbar\n", buf.String())
}
func TestPipelineWithStats(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithStats_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 500
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(strings.Repeat("foo\n", 100)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
ParallelismSpec: &pps.ParallelismSpec{
Constant: 4,
},
})
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Check we can list datums before job completion
resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, numFiles, len(resp.DatumInfos))
require.Equal(t, 1, len(resp.DatumInfos[0].Data))
// Check we can list datums before job completion w pagination
resp, err = c.ListDatum(jobs[0].Job.ID, 100, 0)
require.NoError(t, err)
require.Equal(t, 100, len(resp.DatumInfos))
require.Equal(t, int64(numFiles/100), resp.TotalPages)
require.Equal(t, int64(0), resp.Page)
// Block on the job being complete before we call ListDatum again so we're
// sure the datums have actually been processed.
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err = c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, numFiles, len(resp.DatumInfos))
require.Equal(t, 1, len(resp.DatumInfos[0].Data))
for _, datum := range resp.DatumInfos {
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
}
// Make sure inspect-datum works
datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
}
func TestPipelineWithStatsFailedDatums(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithStatsFailedDatums_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 200
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(strings.Repeat("foo\n", 100)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"if [ $RANDOM -gt 15000 ]; then exit 1; fi",
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
ParallelismSpec: &pps.ParallelismSpec{
Constant: 4,
},
})
_, err = c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
// Without this sleep, I get no results from list-job
// See issue: https://github.com/pachyderm/pachyderm/issues/2181
time.Sleep(15 * time.Second)
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Block on the job being complete before we call ListDatum
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, numFiles, len(resp.DatumInfos))
// First entry should be failed
require.Equal(t, pps.DatumState_FAILED, resp.DatumInfos[0].State)
// Last entry should be success
require.Equal(t, pps.DatumState_SUCCESS, resp.DatumInfos[len(resp.DatumInfos)-1].State)
// Make sure inspect-datum works for failed state
datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_FAILED, datum.State)
}
func TestPipelineWithStatsPaginated(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithStatsPaginated_data")
require.NoError(t, c.CreateRepo(dataRepo))
numPages := int64(2)
pageSize := int64(100)
numFiles := int(numPages * pageSize)
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(strings.Repeat("foo\n", 100)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"if [ $RANDOM -gt 15000 ]; then exit 1; fi",
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
ParallelismSpec: &pps.ParallelismSpec{
Constant: 4,
},
})
_, err = c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
var jobs []*pps.JobInfo
require.NoError(t, backoff.Retry(func() error {
jobs, err = c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
if len(jobs) != 1 {
return fmt.Errorf("expected 1 jobs, got %d", len(jobs))
}
return nil
}, backoff.NewTestingBackOff()))
// Block on the job being complete before we call ListDatum
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err := c.ListDatum(jobs[0].Job.ID, pageSize, 0)
require.NoError(t, err)
require.Equal(t, pageSize, int64(len(resp.DatumInfos)))
require.Equal(t, int64(numFiles)/pageSize, resp.TotalPages)
// First entry should be failed
require.Equal(t, pps.DatumState_FAILED, resp.DatumInfos[0].State)
resp, err = c.ListDatum(jobs[0].Job.ID, pageSize, int64(numPages-1))
require.NoError(t, err)
require.Equal(t, pageSize, int64(len(resp.DatumInfos)))
require.Equal(t, int64(int64(numFiles)/pageSize-1), resp.Page)
// Last entry should be success
require.Equal(t, pps.DatumState_SUCCESS, resp.DatumInfos[len(resp.DatumInfos)-1].State)
// Make sure we get error when requesting pages too high
resp, err = c.ListDatum(jobs[0].Job.ID, pageSize, int64(numPages))
require.YesError(t, err)
}
func TestPipelineWithStatsAcrossJobs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithStatsAcrossJobs_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 500
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("foo-%d", i), strings.NewReader(strings.Repeat("foo\n", 100)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("StatsAcrossJobs")
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
ParallelismSpec: &pps.ParallelismSpec{
Constant: 4,
},
})
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Block on the job being complete before we call ListDatum
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, numFiles, len(resp.DatumInfos))
datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit2.ID, fmt.Sprintf("bar-%d", i), strings.NewReader(strings.Repeat("bar\n", 100)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
commitIter, err = c.FlushCommit([]*pfs.Commit{commit2}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err = c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobs))
// Block on the job being complete before we call ListDatum
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err = c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
// we should see all the datums from the first job (which should be skipped)
// in addition to all the new datums processed in this job
require.Equal(t, numFiles*2, len(resp.DatumInfos))
datum, err = c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
// Test datums marked as skipped correctly
// (also tests list datums are sorted by state)
datum, err = c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[numFiles].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_SKIPPED, datum.State)
}
func TestPipelineWithStatsSkippedEdgeCase(t *testing.T) {
// If I add a file in commit1, delete it in commit2, add it again in commit 3 ...
// the datum will be marked as success on the 3rd job, even though it should be marked as skipped
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithStatsSkippedEdgeCase_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 10
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(strings.Repeat("foo\n", 100)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("StatsEdgeCase")
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
ParallelismSpec: &pps.ParallelismSpec{
Constant: 4,
},
})
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Block on the job being complete before we call ListDatum
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, numFiles, len(resp.DatumInfos))
for _, datum := range resp.DatumInfos {
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
}
// Make sure inspect-datum works
datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
// Create a second commit that deletes a file in commit1
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
err = c.DeleteFile(dataRepo, commit2.ID, "file-0")
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
// Create a third commit that re-adds the file removed in commit2
commit3, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit3.ID, "file-0", strings.NewReader(strings.Repeat("foo\n", 100)))
require.NoError(t, c.FinishCommit(dataRepo, commit3.ID))
commitIter, err = c.FlushCommit([]*pfs.Commit{commit3}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err = c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 3, len(jobs))
// Block on the job being complete before we call ListDatum
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err = c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, numFiles, len(resp.DatumInfos))
var states []interface{}
for _, datum := range resp.DatumInfos {
require.Equal(t, pps.DatumState_SKIPPED, datum.State)
states = append(states, datum.State)
}
}
func TestIncrementalSharedProvenance(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestIncrementalSharedProvenance_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline1 := tu.UniqueString("pipeline1")
require.NoError(t, c.CreatePipeline(
pipeline1,
"",
[]string{"true"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
pipeline2 := tu.UniqueString("pipeline2")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline2),
Transform: &pps.Transform{
Cmd: []string{"true"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewCrossInput(
client.NewAtomInput(dataRepo, "/"),
client.NewAtomInput(pipeline1, "/"),
),
Incremental: true,
})
require.YesError(t, err)
pipeline3 := tu.UniqueString("pipeline3")
require.NoError(t, c.CreatePipeline(
pipeline3,
"",
[]string{"true"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
pipeline4 := tu.UniqueString("pipeline4")
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline4),
Transform: &pps.Transform{
Cmd: []string{"true"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewCrossInput(
client.NewAtomInput(pipeline1, "/"),
client.NewAtomInput(pipeline3, "/"),
),
Incremental: true,
})
require.YesError(t, err)
}
func TestSkippedDatums(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
// require.NoError(t, c.CreatePipeline(
_, err := c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
})
require.NoError(t, err)
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
commitInfoIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, commit1.ID)}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitInfoIter)
require.Equal(t, 1, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
// Do second commit to repo
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file2", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
commitInfoIter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitInfoIter)
require.Equal(t, 1, len(commitInfos))
/*
jobs, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobs))
datums, err := c.ListDatum(jobs[1].Job.ID)
fmt.Printf("got datums: %v\n", datums)
require.NoError(t, err)
require.Equal(t, 2, len(datums))
datum, err := c.InspectDatum(jobs[1].Job.ID, datums[0].ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
*/
}
func TestOpencvDemo(t *testing.T) {
t.Skip("flaky")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
require.NoError(t, c.CreateRepo("images"))
commit, err := c.StartCommit("images", "master")
require.NoError(t, err)
require.NoError(t, c.PutFileURL("images", "master", "46Q8nDz.jpg", "http://imgur.com/46Q8nDz.jpg", false, false))
require.NoError(t, c.FinishCommit("images", "master"))
bytes, err := ioutil.ReadFile("../../doc/examples/opencv/edges.json")
require.NoError(t, err)
createPipelineRequest := &pps.CreatePipelineRequest{}
require.NoError(t, json.Unmarshal(bytes, createPipelineRequest))
_, err = c.PpsAPIClient.CreatePipeline(context.Background(), createPipelineRequest)
require.NoError(t, err)
bytes, err = ioutil.ReadFile("../../doc/examples/opencv/montage.json")
require.NoError(t, err)
createPipelineRequest = &pps.CreatePipelineRequest{}
require.NoError(t, json.Unmarshal(bytes, createPipelineRequest))
_, err = c.PpsAPIClient.CreatePipeline(context.Background(), createPipelineRequest)
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 2, len(commitInfos))
}
func TestCronPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
t.Run("SimpleCron", func(t *testing.T) {
pipeline1 := tu.UniqueString("cron1-")
require.NoError(t, c.CreatePipeline(
pipeline1,
"",
[]string{"cp", "/pfs/time/time", "/pfs/out/time"},
nil,
nil,
client.NewCronInput("time", "@every 20s"),
"",
false,
))
pipeline2 := tu.UniqueString("cron2-")
require.NoError(t, c.CreatePipeline(
pipeline2,
"",
[]string{"cp", fmt.Sprintf("/pfs/%s/time", pipeline1), "/pfs/out/time"},
nil,
nil,
client.NewAtomInput(pipeline1, "/*"),
"",
false,
))
// subscribe to the pipeline1 cron repo and wait for inputs
repo := fmt.Sprintf("%s_%s", pipeline1, "time")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
defer cancel() //cleanup resources
iter, err := c.WithCtx(ctx).SubscribeCommit(repo, "master", "", pfs.CommitState_STARTED)
require.NoError(t, err)
commitInfo, err := iter.Next()
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{commitInfo.Commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 2, len(commitInfos))
})
// Create a non-cron input repo, and test a pipeline with a cross of cron and
// non-cron inputs
t.Run("CronAtomCross", func(t *testing.T) {
dataRepo := tu.UniqueString("TestCronPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline3 := tu.UniqueString("cron3-")
require.NoError(t, c.CreatePipeline(
pipeline3,
"",
[]string{"bash"},
[]string{
"cp /pfs/time/time /pfs/out/time",
fmt.Sprintf("cp /pfs/%s/file /pfs/out/file", dataRepo),
},
nil,
client.NewCrossInput(
client.NewCronInput("time", "@every 20s"),
client.NewAtomInput(dataRepo, "/"),
),
"",
false,
))
dataCommit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("file"))
require.NoError(t, c.FinishCommit(dataRepo, "master"))
repo := fmt.Sprintf("%s_%s", pipeline3, "time")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
iter, err := c.WithCtx(ctx).SubscribeCommit(repo, "master", "", pfs.CommitState_STARTED)
require.NoError(t, err)
commitInfo, err := iter.Next()
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{dataCommit, commitInfo.Commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
})
t.Run("CronIncremental", func(t *testing.T) {
pipeline := tu.UniqueString("CronIncremental-")
req := &pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{Name: pipeline},
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"cat /pfs/time/time >> /pfs/out/time",
"echo \"\" >> /pfs/out/time",
},
},
Input: client.NewCronInput("time", "@every 10s"),
Incremental: true,
}
_, err := c.PpsAPIClient.CreatePipeline(c.Ctx(), req)
require.NoError(t, err)
// subscribe to the pipeline1 cron repo and wait for inputs
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
defer cancel() //cleanup resources
iter, err := c.WithCtx(ctx).SubscribeCommit(pipeline, "master", "", pfs.CommitState_STARTED)
require.NoError(t, err)
for i := 0; i < 5; i++ {
commitInfo, err := iter.Next()
require.NoError(t, err)
_, err = c.BlockCommit(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID)
require.NoError(t, err)
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID, "time", 0, 0, &buf))
require.Equal(t, i+2, len(strings.Split(buf.String(), "\n")))
}
})
t.Run("CronIncrementalFailures", func(t *testing.T) {
pipeline := tu.UniqueString("CronIncremental-")
req := &pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{Name: pipeline},
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"FLIP=$(($(($RANDOM%10))%2))",
"if [ $FLIP -eq 0 ]; then exit 1; fi",
"cat /pfs/time/time >> /pfs/out/time",
"echo \"\" >> /pfs/out/time",
},
},
Input: client.NewCronInput("time", "@every 10s"),
Incremental: true,
}
_, err := c.PpsAPIClient.CreatePipeline(c.Ctx(), req)
require.NoError(t, err)
// subscribe to the pipeline1 cron repo and wait for inputs
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
defer cancel() //cleanup resources
iter, err := c.WithCtx(ctx).SubscribeCommit(pipeline, "master", "", pfs.CommitState_STARTED)
require.NoError(t, err)
for i := 0; i < 5; i++ {
commitInfo, err := iter.Next()
require.NoError(t, err)
_, err = c.BlockCommit(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID)
}
})
}
func TestSelfReferentialPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
pipeline := tu.UniqueString("pipeline")
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"true"},
nil,
nil,
client.NewAtomInput(pipeline, "/"),
"",
false,
))
}
func TestPipelineBadImage(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
pipeline1 := tu.UniqueString("bad_pipeline")
require.NoError(t, c.CreatePipeline(
pipeline1,
"BadImage",
[]string{"true"},
nil,
nil,
client.NewCronInput("time", "@every 20s"),
"",
false,
))
pipeline2 := tu.UniqueString("bad_pipeline")
require.NoError(t, c.CreatePipeline(
pipeline2,
"bs/badimage:vcrap",
[]string{"true"},
nil,
nil,
client.NewCronInput("time", "@every 20s"),
"",
false,
))
require.NoError(t, backoff.Retry(func() error {
for _, pipeline := range []string{pipeline1, pipeline2} {
pipelineInfo, err := c.InspectPipeline(pipeline)
if err != nil {
return err
}
if pipelineInfo.State != pps.PipelineState_PIPELINE_FAILURE {
return fmt.Errorf("pipeline %s should have failed", pipeline)
}
require.True(t, pipelineInfo.Reason != "")
}
return nil
}, backoff.NewTestingBackOff()))
}
func TestFixPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestFixPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("1"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
pipelineName := tu.UniqueString("TestFixPipeline_pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"exit 1"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
if len(jobInfos) != 1 {
return fmt.Errorf("expected 1 jobs, got %d", len(jobInfos))
}
jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State)
return nil
}, backoff.NewTestingBackOff()))
// Update the pipeline, this will not create a new pipeline as reprocess
// isn't set to true.
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{"echo bar >/pfs/out/file"},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
true,
))
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
if len(jobInfos) != 2 {
return fmt.Errorf("expected 2 jobs, got %d", len(jobInfos))
}
jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)
return nil
}, backoff.NewTestingBackOff()))
}
func TestListJobOutput(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
dataRepo := tu.UniqueString("TestListJobOutput_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
&pps.ParallelismSpec{
Constant: 4,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob("", nil, commitInfos[0].Commit)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("expected 1 job")
}
jobInfos, err = c.ListJob("", nil, client.NewCommit(pipeline, "master"))
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("expected 1 job")
}
return nil
}, backoff.NewTestingBackOff()))
}
func TestPipelineEnvVarAlias(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineEnvVarAlias_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 10
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(fmt.Sprintf("%d", i)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"env",
fmt.Sprintf("cp $%s /pfs/out/", dataRepo),
},
nil,
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
for i := 0; i < numFiles; i++ {
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file-%d", i), 0, 0, &buf))
require.Equal(t, fmt.Sprintf("%d", i), buf.String())
}
}
func TestMaxQueueSize(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestMaxQueueSize_input")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < 20; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("TestMaxQueueSize_output")
// This pipeline sleeps for 10 secs per datum
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"sleep 5",
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
ParallelismSpec: &pps.ParallelismSpec{
Constant: 2,
},
MaxQueueSize: 1,
ChunkSpec: &pps.ChunkSpec{
Number: 10,
},
})
require.NoError(t, err)
var jobInfo *pps.JobInfo
for i := 0; i < 10; i++ {
require.NoError(t, backoff.Retry(func() error {
jobs, err := c.ListJob(pipeline, nil, nil)
if err != nil {
return fmt.Errorf("could not list job: %s", err.Error())
}
if len(jobs) == 0 {
return fmt.Errorf("failed to find job")
}
jobInfo, err = c.InspectJob(jobs[0].Job.ID, false)
if err != nil {
return fmt.Errorf("could not inspect job: %s", err.Error())
}
if len(jobInfo.WorkerStatus) != 2 {
return fmt.Errorf("incorrect number of statuses: %v", len(jobInfo.WorkerStatus))
}
return nil
}, backoff.RetryEvery(500*time.Millisecond).For(60*time.Second)))
for _, status := range jobInfo.WorkerStatus {
if status.QueueSize > 1 {
t.Fatalf("queue size too big: %d", status.QueueSize)
}
}
time.Sleep(500 * time.Millisecond)
}
}
func TestHTTPAuth(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
clientAddr := c.GetAddress()
host, _, err := net.SplitHostPort(clientAddr)
port, ok := os.LookupEnv("PACHD_SERVICE_PORT_API_HTTP_PORT")
if !ok {
port = "30652" // default NodePort port for Pachd's HTTP API
}
httpAPIAddr := net.JoinHostPort(host, port)
// Try to login
token := "abbazabbadoo"
form := url.Values{}
form.Add("Token", token)
req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/v1/auth/login", httpAPIAddr), strings.NewReader(form.Encode()))
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
require.NoError(t, err)
httpClient := &http.Client{}
resp, err := httpClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
require.Equal(t, 1, len(resp.Cookies()))
require.Equal(t, auth.ContextTokenKey, resp.Cookies()[0].Name)
require.Equal(t, "*", resp.Header.Get("Access-Control-Allow-Origin"))
require.Equal(t, token, resp.Cookies()[0].Value)
// Try to logout
req, err = http.NewRequest("POST", fmt.Sprintf("http://%s/v1/auth/logout", httpAPIAddr), nil)
require.NoError(t, err)
resp, err = httpClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
require.Equal(t, 1, len(resp.Cookies()))
require.Equal(t, auth.ContextTokenKey, resp.Cookies()[0].Name)
require.Equal(t, "*", resp.Header.Get("Access-Control-Allow-Origin"))
// The cookie should be unset now
require.Equal(t, "", resp.Cookies()[0].Value)
// Make sure we get 404s for non existent routes
req, err = http.NewRequest("POST", fmt.Sprintf("http://%s/v1/auth/logoutzz", httpAPIAddr), nil)
require.NoError(t, err)
resp, err = httpClient.Do(req)
require.NoError(t, err)
require.Equal(t, 404, resp.StatusCode)
}
func TestHTTPGetFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
dataRepo := tu.UniqueString("TestHTTPGetFile_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
f, err := os.Open("../../etc/testing/artifacts/giphy.gif")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "giphy.gif", f)
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
clientAddr := c.GetAddress()
host, _, err := net.SplitHostPort(clientAddr)
port, ok := os.LookupEnv("PACHD_SERVICE_PORT_API_HTTP_PORT")
if !ok {
port = "30652" // default NodePort port for Pachd's HTTP API
}
httpAPIAddr := net.JoinHostPort(host, port)
// Try to get raw contents
resp, err := http.Get(fmt.Sprintf("http://%s/v1/pfs/repos/%v/commits/%v/files/file", httpAPIAddr, dataRepo, commit1.ID))
require.NoError(t, err)
defer resp.Body.Close()
contents, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, "foo", string(contents))
contentDisposition := resp.Header.Get("Content-Disposition")
require.Equal(t, "", contentDisposition)
// Try to get file for downloading
resp, err = http.Get(fmt.Sprintf("http://%s/v1/pfs/repos/%v/commits/%v/files/file?download=true", httpAPIAddr, dataRepo, commit1.ID))
require.NoError(t, err)
defer resp.Body.Close()
contents, err = ioutil.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, "foo", string(contents))
contentDisposition = resp.Header.Get("Content-Disposition")
require.Equal(t, "attachment; filename=\"file\"", contentDisposition)
// Make sure MIME type is set
resp, err = http.Get(fmt.Sprintf("http://%s/v1/pfs/repos/%v/commits/%v/files/giphy.gif", httpAPIAddr, dataRepo, commit1.ID))
require.NoError(t, err)
defer resp.Body.Close()
contentDisposition = resp.Header.Get("Content-Type")
require.Equal(t, "image/gif", contentDisposition)
}
func TestService(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestService_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
_, err = c.PutFile(dataRepo, commit1.ID, "file1", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipelineservice")
// This pipeline sleeps for 10 secs per datum
require.NoError(t, c.CreatePipelineService(
pipeline,
"trinitronx/python-simplehttpserver",
[]string{"sh"},
[]string{
"cd /pfs",
"exec python -m SimpleHTTPServer 8000",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
false,
8000,
31800,
))
time.Sleep(10 * time.Second)
// Lookup the address for 'pipelineservice' (different inside vs outside k8s)
serviceAddr := func() string {
// Hack: detect if running inside the cluster by looking for this env var
if _, ok := os.LookupEnv("KUBERNETES_PORT"); !ok {
// Outside cluster: Re-use external IP and external port defined above
clientAddr := c.GetAddress()
host, _, err := net.SplitHostPort(clientAddr)
require.NoError(t, err)
return net.JoinHostPort(host, "31800")
}
// Get k8s service corresponding to pachyderm service above--must access
// via internal cluster IP, but we don't know what that is
var address string
kubeClient := getKubeClient(t)
backoff.Retry(func() error {
svcs, err := kubeClient.CoreV1().Services("default").List(metav1.ListOptions{})
require.NoError(t, err)
for _, svc := range svcs.Items {
// Pachyderm actually generates two services for pipelineservice: one
// for pachyderm (a ClusterIP service) and one for the user container
// (a NodePort service, which is the one we want)
rightName := strings.Contains(svc.Name, "pipelineservice")
rightType := svc.Spec.Type == v1.ServiceTypeNodePort
if !rightName || !rightType {
continue
}
host := svc.Spec.ClusterIP
port := fmt.Sprintf("%d", svc.Spec.Ports[0].Port)
address = net.JoinHostPort(host, port)
return nil
}
return fmt.Errorf("no matching k8s service found")
}, backoff.NewTestingBackOff())
require.NotEqual(t, "", address)
return address
}()
require.NoError(t, backoff.Retry(func() error {
resp, err := http.Get(fmt.Sprintf("http://%s/%s/file1", serviceAddr, dataRepo))
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("GET returned %d", resp.StatusCode)
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if string(content) != "foo" {
return fmt.Errorf("wrong content for file1: expected foo, got %s", string(content))
}
return nil
}, backoff.NewTestingBackOff()))
clientAddr := c.GetAddress()
host, _, err := net.SplitHostPort(clientAddr)
port, ok := os.LookupEnv("PACHD_SERVICE_PORT_API_HTTP_PORT")
if !ok {
port = "30652" // default NodePort port for Pachd's HTTP API
}
httpAPIAddr := net.JoinHostPort(host, port)
url := fmt.Sprintf("http://%s/v1/pps/services/%s/%s/file1", httpAPIAddr, pipeline, dataRepo)
require.NoError(t, backoff.Retry(func() error {
resp, err := http.Get(url)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("GET returned %d", resp.StatusCode)
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if string(content) != "foo" {
return fmt.Errorf("wrong content for file1: expected foo, got %s", string(content))
}
return nil
}, backoff.NewTestingBackOff()))
commit2, err := c.StartCommit(dataRepo, "master")
_, err = c.PutFile(dataRepo, commit2.ID, "file2", strings.NewReader("bar"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
require.NoError(t, backoff.Retry(func() error {
resp, err := http.Get(fmt.Sprintf("http://%s/%s/file2", serviceAddr, dataRepo))
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("GET returned %d", resp.StatusCode)
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if string(content) != "bar" {
return fmt.Errorf("wrong content for file2: expected bar, got %s", string(content))
}
return nil
}, backoff.NewTestingBackOff()))
}
func TestChunkSpec(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestChunkSpec_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
numFiles := 101
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
t.Run("number", func(t *testing.T) {
pipeline := tu.UniqueString("TestChunkSpec")
c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
ChunkSpec: &pps.ChunkSpec{Number: 1},
})
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
for i := 0; i < numFiles; i++ {
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file%d", i), 0, 0, &buf))
require.Equal(t, "foo", buf.String())
}
})
t.Run("size", func(t *testing.T) {
pipeline := tu.UniqueString("TestChunkSpec")
c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
ChunkSpec: &pps.ChunkSpec{SizeBytes: 5},
})
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
for i := 0; i < numFiles; i++ {
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file%d", i), 0, 0, &buf))
require.Equal(t, "foo", buf.String())
}
})
}
func TestLongDatums(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestLongDatums_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
numFiles := 8
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("TestLongDatums")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"sleep 1m",
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
&pps.ParallelismSpec{
Constant: 4,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
for i := 0; i < numFiles; i++ {
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file%d", i), 0, 0, &buf))
require.Equal(t, "foo", buf.String())
}
}
func TestPipelineWithGitInputInvalidURLs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
// Of the common git URL types (listed below), only the 'clone' url is supported RN
// (for several reasons, one of which is that we can't assume we have SSH / an ssh env setup on the user container)
//git_url: "git://github.com/sjezewski/testgithook.git",
//ssh_url: "git@github.com:sjezewski/testgithook.git",
//svn_url: "https://github.com/sjezewski/testgithook",
//clone_url: "https://github.com/sjezewski/testgithook.git",
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "git://github.com/pachyderm/test-artifacts.git",
},
},
"",
false,
))
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "git@github.com:pachyderm/test-artifacts.git",
},
},
"",
false,
))
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com:pachyderm/test-artifacts",
},
},
"",
false,
))
}
func TestPipelineWithGitInputPrivateGHRepo(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
repoName := "pachyderm-dummy"
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/%v/.git/HEAD > /pfs/out/%v", repoName, outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: fmt.Sprintf("https://github.com/pachyderm/%v.git", repoName),
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
repos, err := c.ListRepo()
require.NoError(t, err)
found := false
for _, repo := range repos {
if repo.Repo.Name == repoName {
found = true
}
}
require.Equal(t, true, found)
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/private.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
// Now there should NOT be a new commit on the pachyderm repo
commits, err := c.ListCommit(repoName, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
// We should see that the pipeline has failed
pipelineInfo, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, pps.PipelineState_PIPELINE_FAILURE, pipelineInfo.State)
require.Equal(t, fmt.Sprintf("unable to clone private github repo (https://github.com/pachyderm/%v.git)", repoName), pipelineInfo.Reason)
}
func TestPipelineWithGitInputDuplicateNames(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
//Test same name on one pipeline
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/pachyderm/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Cross: []*pps.Input{
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
Name: "foo",
},
},
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
Name: "foo",
},
},
},
},
"",
false,
))
//Test same URL on one pipeline
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/pachyderm/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Cross: []*pps.Input{
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
},
},
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
},
},
},
},
"",
false,
))
// Test same URL but different names
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/pachyderm/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Cross: []*pps.Input{
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
Name: "foo",
},
},
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
},
},
},
},
"",
false,
))
}
func TestPipelineWithGitInput(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
_, err := c.InspectRepo("test-artifacts")
require.NoError(t, err)
commits, err := c.ListCommit("test-artifacts", "master", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
branches, err := c.ListBranch("test-artifacts")
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, "master", branches[0].Name)
commit := branches[0].Head
// Now wait for the pipeline complete as normal
outputRepo := &pfs.Repo{Name: pipeline}
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, []*pfs.Repo{outputRepo})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit = commitInfos[0].Commit
var buf bytes.Buffer
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String()))
}
func TestPipelineWithGitInputSequentialPushes(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
_, err := c.InspectRepo("test-artifacts")
require.NoError(t, err)
commits, err := c.ListCommit("test-artifacts", "master", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
branches, err := c.ListBranch("test-artifacts")
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, "master", branches[0].Name)
commit := branches[0].Head
// Now wait for the pipeline complete as normal
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit = commitInfos[0].Commit
var buf bytes.Buffer
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String()))
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master-2.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
branches, err = c.ListBranch("test-artifacts")
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, "master", branches[0].Name)
commit = branches[0].Head
// Now wait for the pipeline complete as normal
commitIter, err = c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit = commitInfos[0].Commit
buf.Reset()
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "162963b4adf00cd378488abdedc085ba08e21674", strings.TrimSpace(buf.String()))
}
func TestPipelineWithGitInputCustomName(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
repoName := "foo"
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/%v/.git/HEAD > /pfs/out/%v", repoName, outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
Name: repoName,
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
_, err := c.InspectRepo(repoName)
require.NoError(t, err)
commits, err := c.ListCommit(repoName, "", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
branches, err := c.ListBranch(repoName)
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, "master", branches[0].Name)
commit := branches[0].Head
// Now wait for the pipeline complete as normal
outputRepo := &pfs.Repo{Name: pipeline}
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, []*pfs.Repo{outputRepo})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit = commitInfos[0].Commit
var buf bytes.Buffer
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String()))
}
func TestPipelineWithGitInputMultiPipelineSeparateInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
repos := []string{"pachyderm", "foo"}
pipelines := []string{
tu.UniqueString("github_pipeline_a_"),
tu.UniqueString("github_pipeline_b_"),
}
for i, repoName := range repos {
require.NoError(t, c.CreatePipeline(
pipelines[i],
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/%v/.git/HEAD > /pfs/out/%v", repoName, outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
Name: repoName,
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
repos, err := c.ListRepo()
require.NoError(t, err)
found := false
for _, repo := range repos {
if repo.Repo.Name == repoName {
found = true
}
}
require.Equal(t, true, found)
commits, err := c.ListCommit(repoName, "", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
}
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
for i, repoName := range repos {
// Now there should be a new commit on the pachyderm repo / master branch
branches, err := c.ListBranch(repoName)
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, "master", branches[0].Name)
commit := branches[0].Head
// Now wait for the pipeline complete as normal
outputRepo := &pfs.Repo{Name: pipelines[i]}
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, []*pfs.Repo{outputRepo})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit = commitInfos[0].Commit
var buf bytes.Buffer
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String()))
}
}
func TestPipelineWithGitInputMultiPipelineSameInput(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
repos := []string{"test-artifacts", "test-artifacts"}
pipelines := []string{
tu.UniqueString("github_pipeline_a_"),
tu.UniqueString("github_pipeline_b_"),
}
for i, repoName := range repos {
require.NoError(t, c.CreatePipeline(
pipelines[i],
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/%v/.git/HEAD > /pfs/out/%v", repoName, outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
repos, err := c.ListRepo()
require.NoError(t, err)
found := false
for _, repo := range repos {
if repo.Repo.Name == repoName {
found = true
}
}
require.Equal(t, true, found)
commits, err := c.ListCommit(repoName, "", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
}
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
branches, err := c.ListBranch(repos[0])
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, "master", branches[0].Name)
commit := branches[0].Head
// Now wait for the pipeline complete as normal
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 2, len(commitInfos))
commit = commitInfos[0].Commit
for _, commitInfo := range commitInfos {
commit = commitInfo.Commit
var buf bytes.Buffer
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String()))
}
}
func TestPipelineWithGitInputAndBranch(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
branchName := "foo"
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
Branch: branchName,
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
_, err := c.InspectRepo("test-artifacts")
require.NoError(t, err)
// Make sure a push to master does NOT trigger this pipeline
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(5 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
_, err = c.InspectBranch("test-artifacts", "master")
require.YesError(t, err)
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/branch.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(5 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
branches, err := c.ListBranch("test-artifacts")
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, branchName, branches[0].Name)
commit := branches[0].Head
require.NotNil(t, commit)
// Now wait for the pipeline complete as normal
outputRepo := &pfs.Repo{Name: pipeline}
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, []*pfs.Repo{outputRepo})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit = commitInfos[0].Commit
var buf bytes.Buffer
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "81269575dcfc6ac2e2a463ad8016163f79c97f5c", strings.TrimSpace(buf.String()))
}
func TestPipelineWithDatumTimeout(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithDatumTimeout_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file",
strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
timeout := 20
pipeline := tu.UniqueString("pipeline")
duration, err := time.ParseDuration(fmt.Sprintf("%vs", timeout))
require.NoError(t, err)
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"while true; do sleep 1; date; done",
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
DatumTimeout: types.DurationProto(duration),
},
)
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Block on the job being complete before we call ListDatum
jobInfo, err := c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State)
// Now validate the datum timed out properly
resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, 1, len(resp.DatumInfos))
datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_FAILED, datum.State)
// ProcessTime looks like "20 seconds"
tokens := strings.Split(pretty.Duration(datum.Stats.ProcessTime), " ")
require.Equal(t, 2, len(tokens))
seconds, err := strconv.Atoi(tokens[0])
require.NoError(t, err)
require.Equal(t, timeout, seconds)
}
func TestPipelineWithDatumTimeoutControl(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithDatumTimeoutControl_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file",
strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
timeout := 20
pipeline := tu.UniqueString("pipeline")
duration, err := time.ParseDuration(fmt.Sprintf("%vs", timeout))
require.NoError(t, err)
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("sleep %v", timeout-10),
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
DatumTimeout: types.DurationProto(duration),
},
)
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Block on the job being complete before we call ListDatum
jobInfo, err := c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)
}
func TestPipelineWithJobTimeout(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithDatumTimeout_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
numFiles := 2
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%v", i),
strings.NewReader("foo"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
timeout := 20
pipeline := tu.UniqueString("pipeline")
duration, err := time.ParseDuration(fmt.Sprintf("%vs", timeout))
require.NoError(t, err)
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("sleep %v", timeout), // we have 2 datums, so the total exec time will more than double the timeout value
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
JobTimeout: types.DurationProto(duration),
},
)
require.NoError(t, err)
// Wait for the job to get scheduled / appear in listjob
// A sleep of 15s is insufficient
time.Sleep(25 * time.Second)
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Block on the job being complete before we call ListDatum
jobInfo, err := c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_KILLED.String(), jobInfo.State.String())
started, err := types.TimestampFromProto(jobInfo.Started)
require.NoError(t, err)
finished, err := types.TimestampFromProto(jobInfo.Finished)
require.NoError(t, err)
require.True(t, math.Abs((finished.Sub(started)-(time.Second*20)).Seconds()) <= 1.0)
}
func TestCommitDescription(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
dataRepo := tu.UniqueString("TestCommitDescription")
require.NoError(t, c.CreateRepo(dataRepo))
// Test putting a message in StartCommit
commit, err := c.PfsAPIClient.StartCommit(ctx, &pfs.StartCommitRequest{
Branch: "master",
Parent: client.NewCommit(dataRepo, ""),
Description: "test commit description in start-commit",
})
require.NoError(t, err)
c.FinishCommit(dataRepo, commit.ID)
commitInfo, err := c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
require.Equal(t, "test commit description in start-commit", commitInfo.Description)
require.NoError(t, pfspretty.PrintDetailedCommitInfo(commitInfo))
// Test putting a message in FinishCommit
commit, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
c.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{
Commit: commit,
Description: "test commit description in finish-commit",
})
commitInfo, err = c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
require.Equal(t, "test commit description in finish-commit", commitInfo.Description)
require.NoError(t, pfspretty.PrintDetailedCommitInfo(commitInfo))
// Test overwriting a commit message
commit, err = c.PfsAPIClient.StartCommit(ctx, &pfs.StartCommitRequest{
Branch: "master",
Parent: client.NewCommit(dataRepo, ""),
Description: "test commit description in start-commit",
})
require.NoError(t, err)
c.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{
Commit: commit,
Description: "test commit description in finish-commit that overwrites",
})
commitInfo, err = c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
require.Equal(t, "test commit description in finish-commit that overwrites", commitInfo.Description)
require.NoError(t, pfspretty.PrintDetailedCommitInfo(commitInfo))
}
func TestGetFileWithEmptyCommits(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
repoName := tu.UniqueString("TestGetFileWithEmptyCommits")
require.NoError(t, c.CreateRepo(repoName))
// Create a real commit in repoName/master
commit, err := c.StartCommit(repoName, "master")
require.NoError(t, err)
_, err = c.PutFile(repoName, commit.ID, "/file", strings.NewReader("data contents"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repoName, commit.ID))
// Create an empty commit in repoName/master
commit, err = c.StartCommit(repoName, "master")
c.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{
Commit: commit,
Empty: true,
})
// We get a "file not found" error when we try to get a file from repoName/master
buf := bytes.Buffer{}
err = c.GetFile(repoName, "master", "/file", 0, 0, &buf)
require.YesError(t, err)
require.True(t, strings.Contains(err.Error(), "not found"))
}
func TestPipelineDescription(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineDescription_data")
require.NoError(t, c.CreateRepo(dataRepo))
description := "pipeline description"
pipeline := tu.UniqueString("TestPipelineDescription")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{Cmd: []string{"true"}},
Description: description,
Input: client.NewAtomInput(dataRepo, "/"),
})
require.NoError(t, err)
pi, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, description, pi.Description)
}
func TestListJobInputCommits(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
aRepo := tu.UniqueString("TestListJobInputCommits_data_a")
require.NoError(t, c.CreateRepo(aRepo))
bRepo := tu.UniqueString("TestListJobInputCommits_data_b")
require.NoError(t, c.CreateRepo(bRepo))
pipeline := tu.UniqueString("TestListJobInputCommits")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", aRepo),
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", bRepo),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewAtomInput(aRepo, "/*"),
client.NewAtomInput(bRepo, "/*"),
),
"",
false,
))
commita1, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, "master", "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, "master"))
commitb1, err := c.StartCommit(bRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(bRepo, "master", "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(bRepo, "master"))
commitIter, err := c.FlushCommit([]*pfs.Commit{commita1, commitb1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commita2, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, "master", "file", strings.NewReader("bar"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, "master"))
commitIter, err = c.FlushCommit([]*pfs.Commit{commita2, commitb1}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commitb2, err := c.StartCommit(bRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(bRepo, "master", "file", strings.NewReader("bar"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(bRepo, "master"))
commitIter, err = c.FlushCommit([]*pfs.Commit{commita2, commitb2}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobInfos, err := c.ListJob("", []*pfs.Commit{commita1}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobInfos)) // a1 + nil and a1 + b1
jobInfos, err = c.ListJob("", []*pfs.Commit{commitb1}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobInfos)) // a1 + b1 and a2 + b1
jobInfos, err = c.ListJob("", []*pfs.Commit{commita2}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobInfos)) // a2 + b1 and a2 + b2
jobInfos, err = c.ListJob("", []*pfs.Commit{commitb2}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos)) // a2 + b2
jobInfos, err = c.ListJob("", []*pfs.Commit{commita1, commitb1}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
jobInfos, err = c.ListJob("", []*pfs.Commit{commita2, commitb1}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
jobInfos, err = c.ListJob("", []*pfs.Commit{commita2, commitb2}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
jobInfos, err = c.ListJob("", []*pfs.Commit{client.NewCommit(aRepo, "master"), client.NewCommit(bRepo, "master")}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
}
func TestManyJobs(t *testing.T) {
t.Skip("This test is too long to be run as part of CI")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestManyJobs_data")
require.NoError(t, c.CreateRepo(dataRepo))
numPipelines := 10
for i := 0; i < numPipelines; i++ {
pipeline := tu.UniqueString("TestManyJobs")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"true"},
[]string{strings.Repeat("words ", 30), strings.Repeat("words ", 30), strings.Repeat("words ", 30), strings.Repeat("words ", 30), strings.Repeat("words ", 30), strings.Repeat("words ", 30)},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
}
numCommits := 5000
for i := 0; i < numCommits; i++ {
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
}
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
_, err = c.ListJob("", nil, nil)
require.NoError(t, err)
}
func TestExtractRestore(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestExtractRestore_data")
require.NoError(t, c.CreateRepo(dataRepo))
nCommits := 2
r := rand.New(rand.NewSource(45))
fileContent := workload.RandString(r, 40*MB)
for i := 0; i < nCommits; i++ {
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", fmt.Sprintf("file-%d", i), strings.NewReader(fileContent))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
}
numPipelines := 3
input := dataRepo
for i := 0; i < numPipelines; i++ {
pipeline := tu.UniqueString(fmt.Sprintf("TestExtractRestore%d", i))
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(input, "/*"),
"",
false,
))
input = pipeline
}
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, numPipelines, len(commitInfos))
ops, err := c.ExtractAll(false)
require.NoError(t, err)
require.NoError(t, c.DeleteAll())
require.NoError(t, c.Restore(ops))
commitIter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, numPipelines, len(commitInfos))
}
// TestCancelJob creates a long-running job and then kills it, testing that the
// user process is killed.
func TestCancelJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// Create an input repo
repo := tu.UniqueString("TestCancelJob")
require.NoError(t, c.CreateRepo(repo))
// Create an input commit
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "/time", strings.NewReader("600"))
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "/data", strings.NewReader("commit data"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
// Create sleep + copy pipeline
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"sleep `cat /pfs/*/time`",
"cp /pfs/*/data /pfs/out/",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/"),
"",
false,
))
// Wait until PPS has started processing commit
var jobInfo *pps.JobInfo
require.NoErrorWithinT(t, 30*time.Second, func() error {
return backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipeline, []*pfs.Commit{commit}, nil)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("Expected one job, but got %d: %v", len(jobInfos), jobInfos)
}
jobInfo = jobInfos[0]
return nil
}, backoff.NewTestingBackOff())
})
// stop the job
require.NoError(t, c.StopJob(jobInfo.Job.ID))
// Wait until the job is cancelled
require.NoErrorWithinT(t, 30*time.Second, func() error {
return backoff.Retry(func() error {
updatedJobInfo, err := c.InspectJob(jobInfo.Job.ID, false)
if err != nil {
return err
}
if updatedJobInfo.State != pps.JobState_JOB_KILLED {
return fmt.Errorf("job %s is still running, but should be KILLED", jobInfo.Job.ID)
}
return nil
}, backoff.NewTestingBackOff())
})
// Create one more commit to make sure the pipeline can still process input
// commits
commit2, err := c.StartCommit(repo, "master")
require.NoError(t, err)
require.NoError(t, c.DeleteFile(repo, commit2.ID, "/time"))
_, err = c.PutFile(repo, commit2.ID, "/time", strings.NewReader("1"))
require.NoError(t, err)
require.NoError(t, c.DeleteFile(repo, commit2.ID, "/data"))
_, err = c.PutFile(repo, commit2.ID, "/data", strings.NewReader("commit 2 data"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit2.ID))
// Flush commit2, and make sure the output is as expected
iter, err := c.FlushCommit([]*pfs.Commit{commit2}, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, iter)
require.Equal(t, 1, len(commitInfos))
buf := bytes.Buffer{}
err = c.GetFile(pipeline, commitInfos[0].Commit.ID, "/data", 0, 0, &buf)
require.NoError(t, err)
require.Equal(t, "commit 2 data", buf.String())
}
// TestCancelManyJobs creates many jobs to test that the handling of many
// incoming job events is correct. Each job comes up (which tests that that
// cancelling job 'a' does not cancel subsequent job 'b'), must be the only job
// running (which tests that only one job can run at a time), and then is
// cancelled.
func TestCancelManyJobs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// Create an input repo
repo := tu.UniqueString("TestCancelManyJobs")
require.NoError(t, c.CreateRepo(repo))
// Create sleep pipeline
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"sleep", "600"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/"),
"",
false,
))
// Create 10 input commits, to spawn 10 jobs
var commits [10]*pfs.Commit
var err error
for i := 0; i < 10; i++ {
commits[i], err = c.StartCommit(repo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commits[i].ID))
}
// For each expected job: watch to make sure the input job comes up, make
// sure that it's the only job running, then cancel it
for _, commit := range commits {
// Wait until PPS has started processing commit
var jobInfo *pps.JobInfo
require.NoErrorWithinT(t, 30*time.Second, func() error {
return backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipeline, []*pfs.Commit{commit}, nil)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("Expected one job, but got %d: %v", len(jobInfos), jobInfos)
}
jobInfo = jobInfos[0]
return nil
}, backoff.NewTestingBackOff())
})
// Stop the job
require.NoError(t, c.StopJob(jobInfo.Job.ID))
// Check that the job is now killed
require.NoErrorWithinT(t, 30*time.Second, func() error {
return backoff.Retry(func() error {
// TODO(msteffen): once github.com/pachyderm/pachyderm/pull/2642 is
// submitted, change ListJob here to filter on commit1 as the input commit,
// rather than inspecting the input in the test
updatedJobInfo, err := c.InspectJob(jobInfo.Job.ID, false)
if err != nil {
return err
}
if updatedJobInfo.State != pps.JobState_JOB_KILLED {
return fmt.Errorf("job %s is still running, but should be KILLED", jobInfo.Job.ID)
}
return nil
}, backoff.NewTestingBackOff())
})
}
}
// TestDeleteCommitPropagation deletes an input commit and makes sure all
// downstream commits are also deleted.
// DAG in this test: repo -> pipeline[0] -> pipeline[1]
func TestDeleteCommitPropagation(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// Create an input repo
repo := tu.UniqueString("TestDeleteCommitPropagation")
require.NoError(t, c.CreateRepo(repo))
// Create two copy pipelines
numPipelines, numCommits := 2, 2
pipeline := make([]string, numPipelines)
for i := 0; i < numPipelines; i++ {
pipeline[i] = tu.UniqueString(fmt.Sprintf("pipeline%d_", i))
input := []string{repo, pipeline[0]}[i]
require.NoError(t, c.CreatePipeline(
pipeline[i],
"",
[]string{"bash"},
[]string{"cp /pfs/*/* /pfs/out/"},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(input, "/*"),
"",
false,
))
}
// Commit twice to the input repo, creating 4 jobs and 4 output commits
commit := make([]*pfs.Commit, numCommits)
var err error
for i := 0; i < numCommits; i++ {
commit[i], err = c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit[i].ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit[i].ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit[i]}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 2, len(commitInfos))
}
// Delete the first commit in the input repo (not master, but its parent)
// Make sure that 'repo' and all downstream repos only have one commit now.
// This ensures that commits' parents are updated
commits, err := c.ListCommit(repo, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commits))
require.NoError(t, c.DeleteCommit(repo, commit[0].ID))
for _, r := range []string{repo, pipeline[0], pipeline[1]} {
commits, err := c.ListCommit(r, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 1, len(commits))
require.Nil(t, commits[0].ParentCommit)
}
jis, err := c.ListJob(pipeline[0], nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jis))
// Delete the second commit in the input repo (master)
// Make sure that 'repo' and all downstream repos have no commits. This
// ensures that branches are updated.
require.NoError(t, c.DeleteCommit(repo, "master"))
for _, r := range []string{repo, pipeline[0], pipeline[1]} {
commits, err := c.ListCommit(r, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
}
// Make one more input commit, to be sure that the branches are still
// connected properly
finalCommit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, finalCommit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, finalCommit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{finalCommit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 2, len(commitInfos))
}
// TestDeleteCommitRunsJob creates an input reo, commits several times, and then
// creates a pipeline. Creating the pipeline will spawn a job and while that
// job is running, this test deletes the HEAD commit of the input branch, which
// deletes the job's output commit and cancels the job. This should start
// another pipeline that processes the original input HEAD commit's parent.
func TestDeleteCommitRunsJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// Create an input repo
repo := tu.UniqueString("TestDeleteCommitRunsJob")
require.NoError(t, c.CreateRepo(repo))
// Create two input commits. The input commit has two files: 'time' which
// determines how long the processing job runs for, and 'data' which
// determines the job's output. This ensures that the first job (processing
// the second commit) runs for a long time, making it easy to cancel, while
// the second job runs quickly, ensuring that the test finishes quickly
commit1, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "/time", strings.NewReader("1"))
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "/data", strings.NewReader("commit 1 data"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit1.ID))
commit2, err := c.StartCommit(repo, "master")
require.NoError(t, err)
require.NoError(t, c.DeleteFile(repo, commit2.ID, "/time"))
_, err = c.PutFile(repo, commit2.ID, "/time", strings.NewReader("600"))
require.NoError(t, err)
require.NoError(t, c.DeleteFile(repo, commit2.ID, "/data"))
_, err = c.PutFile(repo, commit2.ID, "/data", strings.NewReader("commit 2 data"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit2.ID))
// Create sleep + copy pipeline
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"sleep `cat /pfs/*/time`",
"cp /pfs/*/data /pfs/out/",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/"),
"",
false,
))
// Wait until PPS has started processing commit2
require.NoErrorWithinT(t, 30*time.Second, func() error {
return backoff.Retry(func() error {
// TODO(msteffen): once github.com/pachyderm/pachyderm/pull/2642 is
// submitted, change ListJob here to filter on commit1 as the input commit,
// rather than inspecting the input in the test
jobInfos, err := c.ListJob(pipeline, nil, nil)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("Expected one job, but got %d: %v", len(jobInfos), jobInfos)
}
pps.VisitInput(jobInfos[0].Input, func(input *pps.Input) {
if input.Atom == nil {
err = fmt.Errorf("expected a single atom input, but got: %v", jobInfos[0].Input)
return
}
if input.Atom.Commit != commit2.ID {
err = fmt.Errorf("expected job to process %s, but instead processed: %s", commit2.ID, jobInfos[0].Input)
return
}
})
return err
}, backoff.NewTestingBackOff())
})
// Delete the first commit in the input repo
require.NoError(t, c.DeleteCommit(repo, commit2.ID))
// Wait until PPS has started processing commit1
require.NoErrorWithinT(t, 30*time.Second, func() error {
return backoff.Retry(func() error {
// TODO(msteffen): as above, change ListJob here to filter on commit2 as
// the input, rather than inspecting the input in the test
jobInfos, err := c.ListJob(pipeline, nil, nil)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("Expected one job, but got %d: %v", len(jobInfos), jobInfos)
}
pps.VisitInput(jobInfos[0].Input, func(input *pps.Input) {
if input.Atom == nil {
err = fmt.Errorf("expected a single atom input, but got: %v", jobInfos[0].Input)
return
}
if input.Atom.Commit != commit1.ID {
err = fmt.Errorf("expected job to process %s, but instead processed: %s", commit1.ID, jobInfos[0].Input)
return
}
})
return err
}, backoff.NewTestingBackOff())
})
iter, err := c.FlushCommit([]*pfs.Commit{commit1}, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, iter)
require.Equal(t, 1, len(commitInfos))
// Check that the job processed the right data
buf := bytes.Buffer{}
err = c.GetFile(repo, "master", "/data", 0, 0, &buf)
require.NoError(t, err)
require.Equal(t, "commit 1 data", buf.String())
// Create one more commit to make sure the pipeline can still process input
// commits
commit3, err := c.StartCommit(repo, "master")
require.NoError(t, err)
require.NoError(t, c.DeleteFile(repo, commit3.ID, "/data"))
_, err = c.PutFile(repo, commit3.ID, "/data", strings.NewReader("commit 3 data"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit3.ID))
// Flush commit3, and make sure the output is as expected
iter, err = c.FlushCommit([]*pfs.Commit{commit3}, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos = collectCommitInfos(t, iter)
require.Equal(t, 1, len(commitInfos))
buf.Reset()
err = c.GetFile(pipeline, commitInfos[0].Commit.ID, "/data", 0, 0, &buf)
require.NoError(t, err)
require.Equal(t, "commit 3 data", buf.String())
}
func TestEntryPoint(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestEntryPoint_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("TestSimplePipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"pachyderm_entrypoint",
nil,
nil,
&pps.ParallelismSpec{
Constant: 1,
},
&pps.Input{
Atom: &pps.AtomInput{
Name: "in",
Repo: dataRepo,
Glob: "/*",
},
},
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
}
func TestDeleteSpecRepo(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
dataRepo := tu.UniqueString("TestDeleteSpecRepo_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline := tu.UniqueString("TestSimplePipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"pachyderm_entrypoint",
[]string{"echo", "foo"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
require.YesError(t, c.DeleteRepo(ppsconsts.SpecRepo, false))
}
func TestUserWorkingDir(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
defer require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestUserWorkingDir_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipeline := tu.UniqueString("TestSimplePipeline")
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Image: "pachyderm_entrypoint",
Cmd: []string{"bash"},
Stdin: []string{
"ls -lh /pfs",
"whoami >/pfs/out/whoami",
"pwd >/pfs/out/pwd",
fmt.Sprintf("cat /pfs/%s/file >/pfs/out/file", dataRepo),
},
User: "test",
WorkingDir: "/home/test",
},
Input: client.NewAtomInput(dataRepo, "/"),
})
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "whoami", 0, 0, &buf))
require.Equal(t, "test\n", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "pwd", 0, 0, &buf))
require.Equal(t, "/home/test\n", buf.String())
}
func TestDontReadStdin(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
dataRepo := tu.UniqueString("TestDontReadStdin_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline := tu.UniqueString("TestDontReadStdin")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"true"},
[]string{"stdin that will never be read"},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
numCommits := 20
for i := 0; i < numCommits; i++ {
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
jobInfos, err := c.FlushJobAll([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
require.Equal(t, jobInfos[0].State.String(), pps.JobState_JOB_SUCCESS.String())
}
}
func TestStatsDeleteAll(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithStats_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipeline := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"cp", fmt.Sprintf("/pfs/%s/file", dataRepo), "/pfs/out"},
},
Input: client.NewAtomInput(dataRepo, "/"),
EnableStats: true,
})
jis, err := c.FlushJobAll([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jis))
require.Equal(t, pps.JobState_JOB_SUCCESS.String(), jis[0].State.String())
require.NoError(t, c.DeleteAll())
require.NoError(t, c.CreateRepo(dataRepo))
commit, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"cp", fmt.Sprintf("/pfs/%s/file", dataRepo), "/pfs/out"},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
})
jis, err = c.FlushJobAll([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jis))
require.Equal(t, pps.JobState_JOB_SUCCESS.String(), jis[0].State.String())
require.NoError(t, c.DeleteAll())
}
func TestCorruption(t *testing.T) {
t.Skip("This test takes too long to run on CI.")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
etcdClient := getEtcdClient(t)
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
r := rand.New(rand.NewSource(128))
for i := 0; i < 100; i++ {
dataRepo := tu.UniqueString("TestSimplePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("TestSimplePipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
resp, err := etcdClient.Get(context.Background(), col.DefaultPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
require.NoError(t, err)
for _, kv := range resp.Kvs {
// Delete 1 in 10 keys
if r.Intn(10) == 0 {
_, err := etcdClient.Delete(context.Background(), string(kv.Key))
require.NoError(t, err)
}
}
require.NoError(t, c.DeleteAll())
}
}
func TestPachdPrometheusStats(t *testing.T) {
t.Skip("flake")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
port := os.Getenv("PROM_PORT")
promClient, err := prom_api.NewClient(prom_api.Config{
Address: fmt.Sprintf("http://127.0.0.1:%v", port),
})
require.NoError(t, err)
promAPI := prom_api_v1.NewAPI(promClient)
countQuery := func(t *testing.T, query string) float64 {
result, err := promAPI.Query(context.Background(), query, time.Now())
require.NoError(t, err)
resultVec := result.(prom_model.Vector)
require.Equal(t, 1, len(resultVec))
return float64(resultVec[0].Value)
}
avgQuery := func(t *testing.T, sumQuery string, countQuery string, expected int) {
query := "(" + sumQuery + ")/(" + countQuery + ")"
result, err := promAPI.Query(context.Background(), query, time.Now())
require.NoError(t, err)
resultVec := result.(prom_model.Vector)
require.Equal(t, expected, len(resultVec))
}
// Check stats reported on pachd pod
pod := "app=\"pachd\""
without := "(instance)"
// Check PFS API is reported
t.Run("GetFileAvgRuntime", func(t *testing.T) {
sum := fmt.Sprintf("sum(pachyderm_pachd_get_file_time_sum{%v}) without %v", pod, without)
count := fmt.Sprintf("sum(pachyderm_pachd_get_file_time_count{%v}) without %v", pod, without)
avgQuery(t, sum, count, 2) // 2 results ... one for finished, one for errored
})
t.Run("PutFileAvgRuntime", func(t *testing.T) {
sum := fmt.Sprintf("sum(pachyderm_pachd_put_file_time_sum{%v}) without %v", pod, without)
count := fmt.Sprintf("sum(pachyderm_pachd_put_file_time_count{%v}) without %v", pod, without)
avgQuery(t, sum, count, 1)
})
t.Run("GetFileSeconds", func(t *testing.T) {
query := fmt.Sprintf("sum(pachyderm_pachd_get_file_seconds_count{%v}) without %v", pod, without)
countQuery(t, query) // Just check query has a result
})
t.Run("PutFileSeconds", func(t *testing.T) {
query := fmt.Sprintf("sum(pachyderm_pachd_put_file_seconds_count{%v}) without %v", pod, without)
countQuery(t, query) // Just check query has a result
})
// Check PPS API is reported
t.Run("ListJobSeconds", func(t *testing.T) {
query := fmt.Sprintf("sum(pachyderm_pachd_list_job_seconds_count{%v}) without %v", pod, without)
countQuery(t, query)
})
t.Run("ListJobAvgRuntime", func(t *testing.T) {
sum := fmt.Sprintf("sum(pachyderm_pachd_list_job_time_sum{%v}) without %v", pod, without)
count := fmt.Sprintf("sum(pachyderm_pachd_list_job_time_count{%v}) without %v", pod, without)
avgQuery(t, sum, count, 1)
})
caches := []string{"object", "tag", "object_info"}
for _, cache := range caches {
t.Run(fmt.Sprintf("cache_%v", cache), func(t *testing.T) {
query := fmt.Sprintf("pachyderm_pachd_cache_%v_loads_gauge", cache)
countQuery(t, query)
})
}
}
func getAllObjects(t testing.TB, c *client.APIClient) []*pfs.Object {
objectsClient, err := c.ListObjects(context.Background(), &pfs.ListObjectsRequest{})
require.NoError(t, err)
var objects []*pfs.Object
for object, err := objectsClient.Recv(); err != io.EOF; object, err = objectsClient.Recv() {
require.NoError(t, err)
objects = append(objects, object)
}
return objects
}
func getAllTags(t testing.TB, c *client.APIClient) []string {
tagsClient, err := c.ListTags(context.Background(), &pfs.ListTagsRequest{})
require.NoError(t, err)
var tags []string
for resp, err := tagsClient.Recv(); err != io.EOF; resp, err = tagsClient.Recv() {
require.NoError(t, err)
tags = append(tags, resp.Tag.Name)
}
return tags
}
func restartAll(t *testing.T) {
k := getKubeClient(t)
podsInterface := k.CoreV1().Pods(v1.NamespaceDefault)
podList, err := podsInterface.List(
metav1.ListOptions{
LabelSelector: "suite=pachyderm",
})
require.NoError(t, err)
for _, pod := range podList.Items {
require.NoError(t, podsInterface.Delete(pod.Name, &metav1.DeleteOptions{
GracePeriodSeconds: new(int64),
}))
}
waitForReadiness(t)
}
func restartOne(t *testing.T) {
k := getKubeClient(t)
podsInterface := k.CoreV1().Pods(v1.NamespaceDefault)
podList, err := podsInterface.List(
metav1.ListOptions{
LabelSelector: "app=pachd",
})
require.NoError(t, err)
require.NoError(t, podsInterface.Delete(
podList.Items[rand.Intn(len(podList.Items))].Name,
&metav1.DeleteOptions{GracePeriodSeconds: new(int64)}))
waitForReadiness(t)
}
const (
retries = 10
)
// getUsablePachClient is like getPachClient except it blocks until it gets a
// connection that actually works
func getUsablePachClient(t *testing.T) *client.APIClient {
for i := 0; i < retries; i++ {
client := getPachClient(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
_, err := client.PfsAPIClient.ListRepo(ctx, &pfs.ListRepoRequest{})
if err == nil {
return client
}
}
t.Fatalf("failed to connect after %d tries", retries)
return nil
}
func podRunningAndReady(e watch.Event) (bool, error) {
if e.Type == watch.Deleted {
return false, errors.New("received DELETE while watching pods")
}
pod, ok := e.Object.(*v1.Pod)
if !ok {
}
return pod.Status.Phase == v1.PodRunning, nil
}
func waitForReadiness(t testing.TB) {
k := getKubeClient(t)
deployment := pachdDeployment(t)
for {
newDeployment, err := k.Apps().Deployments(v1.NamespaceDefault).Get(deployment.Name, metav1.GetOptions{})
require.NoError(t, err)
if newDeployment.Status.ObservedGeneration >= deployment.Generation && newDeployment.Status.Replicas == *newDeployment.Spec.Replicas {
break
}
time.Sleep(time.Second * 5)
}
watch, err := k.CoreV1().Pods(v1.NamespaceDefault).Watch(metav1.ListOptions{
LabelSelector: "app=pachd",
})
defer watch.Stop()
require.NoError(t, err)
readyPods := make(map[string]bool)
for event := range watch.ResultChan() {
ready, err := podRunningAndReady(event)
require.NoError(t, err)
if ready {
pod, ok := event.Object.(*v1.Pod)
if !ok {
t.Fatal("event.Object should be an object")
}
readyPods[pod.Name] = true
if len(readyPods) == int(*deployment.Spec.Replicas) {
break
}
}
}
}
func simulateGitPush(t *testing.T, pathToPayload string) {
payload, err := ioutil.ReadFile(pathToPayload)
require.NoError(t, err)
req, err := http.NewRequest(
"POST",
fmt.Sprintf("http://127.0.0.1:%v/v1/handle/push", githook.GitHookPort+30000),
bytes.NewBuffer(payload),
)
req.Header.Set("X-Github-Delivery", "2984f5d0-c032-11e7-82d7-ed3ee54be25d")
req.Header.Set("User-Agent", "GitHub-Hookshot/c1d08eb")
req.Header.Set("X-Github-Event", "push")
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
require.Equal(t, 200, resp.StatusCode)
}
func pipelineRc(t testing.TB, pipelineInfo *pps.PipelineInfo) (*v1.ReplicationController, error) {
k := getKubeClient(t)
rc := k.CoreV1().ReplicationControllers(v1.NamespaceDefault)
return rc.Get(
ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version),
metav1.GetOptions{})
}
func pachdDeployment(t testing.TB) *apps.Deployment {
k := getKubeClient(t)
result, err := k.Apps().Deployments(v1.NamespaceDefault).Get("pachd", metav1.GetOptions{})
require.NoError(t, err)
return result
}
// scalePachd scales the number of pachd nodes up or down.
// If up is true, then the number of nodes will be within (n, 2n]
// If up is false, then the number of nodes will be within [1, n)
func scalePachdRandom(t testing.TB, up bool) {
pachdRc := pachdDeployment(t)
originalReplicas := *pachdRc.Spec.Replicas
for {
if up {
*pachdRc.Spec.Replicas = originalReplicas + int32(rand.Intn(int(originalReplicas))+1)
} else {
*pachdRc.Spec.Replicas = int32(rand.Intn(int(originalReplicas)-1) + 1)
}
if *pachdRc.Spec.Replicas != originalReplicas {
break
}
}
scalePachdN(t, int(*pachdRc.Spec.Replicas))
}
// scalePachdN scales the number of pachd nodes to N
func scalePachdN(t testing.TB, n int) {
k := getKubeClient(t)
// Modify the type metadata of the Deployment spec we read from k8s, so that
// k8s will accept it if we're talking to a 1.7 cluster
pachdDeployment := pachdDeployment(t)
*pachdDeployment.Spec.Replicas = int32(n)
pachdDeployment.TypeMeta.APIVersion = "apps/v1beta1"
_, err := k.Apps().Deployments(v1.NamespaceDefault).Update(pachdDeployment)
require.NoError(t, err)
waitForReadiness(t)
// Unfortunately, even when all pods are ready, the cluster membership
// protocol might still be running, thus PFS API calls might fail. So
// we wait a little bit for membership to stablize.
time.Sleep(15 * time.Second)
}
// scalePachd reads the number of pachd nodes from an env variable and
// scales pachd accordingly.
func scalePachd(t testing.TB) {
nStr := os.Getenv("PACHD")
if nStr == "" {
return
}
n, err := strconv.Atoi(nStr)
require.NoError(t, err)
scalePachdN(t, n)
}
func getKubeClient(t testing.TB) *kube.Clientset {
var config *rest.Config
host := os.Getenv("KUBERNETES_SERVICE_HOST")
if host != "" {
var err error
config, err = rest.InClusterConfig()
require.NoError(t, err)
} else {
// Use kubectl binary to parse .kube/config and get address of current
// cluster. Hopefully, once we upgrade to k8s.io/client-go, we will be able
// to do this in-process with a library
// First, figure out if we're talking to minikube or localhost
cmd := exec.Command("kubectl", "config", "current-context")
if context, err := cmd.Output(); err == nil {
context = bytes.TrimSpace(context)
// kubectl has a context -- not talking to localhost
// Get cluster and user name from kubectl
buf := &bytes.Buffer{}
cmd := tu.BashCmd(strings.Join([]string{
`kubectl config get-contexts "{{.context}}" | tail -n+2 | awk '{print $3}'`,
`kubectl config get-contexts "{{.context}}" | tail -n+2 | awk '{print $4}'`,
}, "\n"),
"context", string(context))
cmd.Stdout = buf
require.NoError(t, cmd.Run(), "couldn't get kubernetes context info")
lines := strings.Split(buf.String(), "\n")
clustername, username := lines[0], lines[1]
// Get user info
buf.Reset()
cmd = tu.BashCmd(strings.Join([]string{
`cluster="$(kubectl config view -o json | jq -r '.users[] | select(.name == "{{.user}}") | .user' )"`,
`echo "${cluster}" | jq -r '.["client-certificate"]'`,
`echo "${cluster}" | jq -r '.["client-key"]'`,
}, "\n"),
"user", username)
cmd.Stdout = buf
require.NoError(t, cmd.Run(), "couldn't get kubernetes user info")
lines = strings.Split(buf.String(), "\n")
clientCert, clientKey := lines[0], lines[1]
// Get cluster info
buf.Reset()
cmd = tu.BashCmd(strings.Join([]string{
`cluster="$(kubectl config view -o json | jq -r '.clusters[] | select(.name == "{{.cluster}}") | .cluster')"`,
`echo "${cluster}" | jq -r .server`,
`echo "${cluster}" | jq -r '.["certificate-authority"]'`,
}, "\n"),
"cluster", clustername)
cmd.Stdout = buf
require.NoError(t, cmd.Run(), "couldn't get kubernetes cluster info: %s", buf.String())
lines = strings.Split(buf.String(), "\n")
address, CAKey := lines[0], lines[1]
// Generate config
config = &rest.Config{
Host: address,
TLSClientConfig: rest.TLSClientConfig{
CertFile: clientCert,
KeyFile: clientKey,
CAFile: CAKey,
},
}
} else {
// no context -- talking to localhost
config = &rest.Config{
Host: "http://0.0.0.0:8080",
TLSClientConfig: rest.TLSClientConfig{
Insecure: false,
},
}
}
}
k, err := kube.NewForConfig(config)
require.NoError(t, err)
return k
}
var pachClient *client.APIClient
var getPachClientOnce sync.Once
func getPachClient(t testing.TB) *client.APIClient {
getPachClientOnce.Do(func() {
var err error
if addr := os.Getenv("PACHD_PORT_650_TCP_ADDR"); addr != "" {
pachClient, err = client.NewInCluster()
} else {
pachClient, err = client.NewOnUserMachine(false, "user")
}
require.NoError(t, err)
})
return pachClient
}
var etcdClient *etcd.Client
var getEtcdClientOnce sync.Once
const (
etcdAddress = "localhost:32379" // etcd must already be serving at this address
)
func getEtcdClient(t testing.TB) *etcd.Client {
getEtcdClientOnce.Do(func() {
var err error
etcdClient, err = etcd.New(etcd.Config{
Endpoints: []string{etcdAddress},
DialOptions: client.EtcdDialOptions(),
})
require.NoError(t, err)
})
return etcdClient
}
Extend delete-pipeline test.
package server
import (
"bytes"
"context"
"encoding/base64"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"strconv"
"strings"
"sync"
"testing"
"time"
"golang.org/x/sync/errgroup"
"github.com/pachyderm/pachyderm/src/client"
"github.com/pachyderm/pachyderm/src/client/auth"
"github.com/pachyderm/pachyderm/src/client/pfs"
"github.com/pachyderm/pachyderm/src/client/pkg/require"
"github.com/pachyderm/pachyderm/src/client/pps"
pfspretty "github.com/pachyderm/pachyderm/src/server/pfs/pretty"
"github.com/pachyderm/pachyderm/src/server/pkg/backoff"
col "github.com/pachyderm/pachyderm/src/server/pkg/collection"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts"
"github.com/pachyderm/pachyderm/src/server/pkg/ppsutil"
"github.com/pachyderm/pachyderm/src/server/pkg/pretty"
tu "github.com/pachyderm/pachyderm/src/server/pkg/testutil"
"github.com/pachyderm/pachyderm/src/server/pkg/uuid"
"github.com/pachyderm/pachyderm/src/server/pkg/workload"
ppspretty "github.com/pachyderm/pachyderm/src/server/pps/pretty"
"github.com/pachyderm/pachyderm/src/server/pps/server/githook"
etcd "github.com/coreos/etcd/clientv3"
"github.com/gogo/protobuf/types"
prom_api "github.com/prometheus/client_golang/api"
prom_api_v1 "github.com/prometheus/client_golang/api/prometheus/v1"
prom_model "github.com/prometheus/common/model"
apps "k8s.io/api/apps/v1beta2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/watch"
kube "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const (
// If this environment variable is set, then the tests are being run
// in a real cluster in the cloud.
InCloudEnv = "PACH_TEST_CLOUD"
)
func TestSimplePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestSimplePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("TestSimplePipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
}
func TestPipelineWithParallelism(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithParallelism_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 1000
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(fmt.Sprintf("%d", i)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
&pps.ParallelismSpec{
Constant: 4,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
for i := 0; i < numFiles; i++ {
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file-%d", i), 0, 0, &buf))
require.Equal(t, fmt.Sprintf("%d", i), buf.String())
}
}
func TestPipelineWithLargeFiles(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithLargeFiles_data")
require.NoError(t, c.CreateRepo(dataRepo))
r := rand.New(rand.NewSource(99))
numFiles := 10
var fileContents []string
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
fileContent := workload.RandString(r, int(pfs.ChunkSize)+i*MB)
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i),
strings.NewReader(fileContent))
require.NoError(t, err)
fileContents = append(fileContents, fileContent)
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
nil,
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit := commitInfos[0].Commit
for i := 0; i < numFiles; i++ {
var buf bytes.Buffer
fileName := fmt.Sprintf("file-%d", i)
fileInfo, err := c.InspectFile(commit.Repo.Name, commit.ID, fileName)
require.NoError(t, err)
require.Equal(t, int(pfs.ChunkSize)+i*MB, int(fileInfo.SizeBytes))
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, fileName, 0, 0, &buf))
// we don't wanna use the `require` package here since it prints
// the strings, which would clutter the output.
if fileContents[i] != buf.String() {
t.Fatalf("file content does not match")
}
}
}
func TestDatumDedup(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestDatumDedup_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
// This pipeline sleeps for 10 secs per datum
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"sleep 10",
},
nil,
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
// Since we did not change the datum, the datum should not be processed
// again, which means that the job should complete instantly.
ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
defer cancel()
stream, err := c.PfsAPIClient.FlushCommit(
ctx,
&pfs.FlushCommitRequest{
Commits: []*pfs.Commit{commit2},
})
require.NoError(t, err)
_, err = stream.Recv()
require.NoError(t, err)
}
func TestPipelineInputDataModification(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineInputDataModification_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
nil,
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
// replace the contents of 'file' in dataRepo (from "foo" to "bar")
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.DeleteFile(dataRepo, commit2.ID, "file"))
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
commitIter, err = c.FlushCommit([]*pfs.Commit{commit2}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
buf.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "bar", buf.String())
// Add a file to dataRepo
commit3, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.DeleteFile(dataRepo, commit3.ID, "file"))
_, err = c.PutFile(dataRepo, commit3.ID, "file2", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit3.ID))
commitIter, err = c.FlushCommit([]*pfs.Commit{commit3}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
require.YesError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
buf.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file2", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
commitInfos, err = c.ListCommit(pipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 3, len(commitInfos))
}
func TestMultipleInputsFromTheSameBranch(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestMultipleInputsFromTheSameBranch_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "dirA/file", strings.NewReader("foo\n"))
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "dirB/file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cat /pfs/out/file",
fmt.Sprintf("cat /pfs/dirA/dirA/file >> /pfs/out/file"),
fmt.Sprintf("cat /pfs/dirB/dirB/file >> /pfs/out/file"),
},
nil,
client.NewCrossInput(
client.NewAtomInputOpts("dirA", dataRepo, "", "/dirA/*", false),
client.NewAtomInputOpts("dirB", dataRepo, "", "/dirB/*", false),
),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "foo\nfoo\n", buf.String())
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "dirA/file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
commitIter, err = c.FlushCommit([]*pfs.Commit{commit2}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
buf.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "foo\nbar\nfoo\n", buf.String())
commit3, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit3.ID, "dirB/file", strings.NewReader("buzz\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit3.ID))
commitIter, err = c.FlushCommit([]*pfs.Commit{commit3}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
buf.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "foo\nbar\nfoo\nbuzz\n", buf.String())
commitInfos, err = c.ListCommit(pipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 3, len(commitInfos))
}
func TestMultipleInputsFromTheSameRepoDifferentBranches(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestMultipleInputsFromTheSameRepoDifferentBranches_data")
require.NoError(t, c.CreateRepo(dataRepo))
branchA := "branchA"
branchB := "branchB"
pipeline := tu.UniqueString("pipeline")
// Creating this pipeline should error, because the two inputs are
// from the same repo but they don't specify different names.
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cat /pfs/branch-a/file >> /pfs/out/file",
"cat /pfs/branch-b/file >> /pfs/out/file",
},
nil,
client.NewCrossInput(
client.NewAtomInputOpts("branch-a", dataRepo, branchA, "/*", false),
client.NewAtomInputOpts("branch-b", dataRepo, branchB, "/*", false),
),
"",
false,
))
commitA, err := c.StartCommit(dataRepo, branchA)
require.NoError(t, err)
c.PutFile(dataRepo, commitA.ID, "/file", strings.NewReader("data A\n"))
c.FinishCommit(dataRepo, commitA.ID)
commitB, err := c.StartCommit(dataRepo, branchB)
require.NoError(t, err)
c.PutFile(dataRepo, commitB.ID, "/file", strings.NewReader("data B\n"))
c.FinishCommit(dataRepo, commitB.ID)
iter, err := c.FlushCommit([]*pfs.Commit{commitA, commitB}, nil)
require.NoError(t, err)
commits := collectCommitInfos(t, iter)
require.Equal(t, 1, len(commits))
buffer := bytes.Buffer{}
require.NoError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "data A\ndata B\n", buffer.String())
}
func TestMultipleInputsFromTheSameRepoDifferentBranchesIncremental(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestMultipleInputsFromTheSameRepoDifferentBranchesIncremental_data")
require.NoError(t, c.CreateRepo(dataRepo))
branchA := "branchA"
branchB := "branchB"
pipeline := tu.UniqueString("pipeline")
// Creating this pipeline should error, because the two inputs are
// from the same repo but they don't specify different names.
req := &pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{Name: pipeline},
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"ls /pfs/out/file-a && echo true >> /pfs/out/prev-a",
"ls /pfs/out/file-b && echo true >> /pfs/out/prev-b",
"ls /pfs/branch-a/file && echo true >> /pfs/out/file-a",
"ls /pfs/branch-b/file && echo true >> /pfs/out/file-b",
},
},
Input: client.NewCrossInput(
client.NewAtomInputOpts("branch-a", dataRepo, branchA, "/*", false),
client.NewAtomInputOpts("branch-b", dataRepo, branchB, "/*", false),
),
Incremental: true,
}
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
_, err := c.PpsAPIClient.CreatePipeline(ctx, req)
require.NoError(t, err)
// Make four commits: branchA, branchB, branchA, branchB. We should see
// 'prev-a' after the third commit, and 'prev-b' after the fourth
commit, err := c.StartCommit(dataRepo, branchA)
require.NoError(t, err)
c.PutFile(dataRepo, commit.ID, "/file", strings.NewReader("data A\n"))
c.FinishCommit(dataRepo, commit.ID)
commit, err = c.StartCommit(dataRepo, branchB)
require.NoError(t, err)
c.PutFile(dataRepo, commit.ID, "/file", strings.NewReader("data B\n"))
c.FinishCommit(dataRepo, commit.ID)
iter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commits := collectCommitInfos(t, iter)
require.Equal(t, 1, len(commits))
buffer := bytes.Buffer{}
require.YesError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "prev-a", 0, 0, &buffer))
buffer.Reset()
require.YesError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "prev-b", 0, 0, &buffer))
commit, err = c.StartCommit(dataRepo, branchA)
require.NoError(t, err)
c.PutFile(dataRepo, commit.ID, "/file", strings.NewReader("data A\n"))
c.FinishCommit(dataRepo, commit.ID)
iter, err = c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commits = collectCommitInfos(t, iter)
require.Equal(t, 1, len(commits))
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "prev-a", 0, 0, &buffer))
buffer.Reset()
require.NoError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "prev-b", 0, 0, &buffer))
}
func TestPipelineFailure(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineFailure_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"exit 1"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
var jobInfos []*pps.JobInfo
require.NoError(t, backoff.Retry(func() error {
jobInfos, err = c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
if len(jobInfos) != 1 {
return fmt.Errorf("expected 1 jobs, got %d", len(jobInfos))
}
return nil
}, backoff.NewTestingBackOff()))
jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), &pps.InspectJobRequest{
Job: jobInfos[0].Job,
BlockState: true,
})
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State)
require.True(t, strings.Contains(jobInfo.Reason, "datum"))
}
func TestEgressFailure(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestEgressFailure_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
// This pipeline should fail because the egress URL is invalid
pipeline := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
Input: client.NewAtomInput(dataRepo, "/"),
Egress: &pps.Egress{"invalid://blahblah"},
})
require.NoError(t, err)
var jobInfos []*pps.JobInfo
require.NoError(t, backoff.Retry(func() error {
jobInfos, err = c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
if len(jobInfos) != 1 {
return fmt.Errorf("expected 1 jobs, got %d", len(jobInfos))
}
return nil
}, backoff.NewTestingBackOff()))
jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), &pps.InspectJobRequest{
Job: jobInfos[0].Job,
BlockState: true,
})
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State)
require.True(t, strings.Contains(jobInfo.Reason, "egress"))
}
func TestLazyPipelinePropagation(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestLazyPipelinePropagation_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipelineA := tu.UniqueString("pipeline-A")
require.NoError(t, c.CreatePipeline(
pipelineA,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInputOpts("", dataRepo, "", "/*", true),
"",
false,
))
pipelineB := tu.UniqueString("pipeline-B")
require.NoError(t, c.CreatePipeline(
pipelineB,
"",
[]string{"cp", path.Join("/pfs", pipelineA, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInputOpts("", pipelineA, "", "/*", true),
"",
false,
))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, commit1.ID)}, nil)
require.NoError(t, err)
collectCommitInfos(t, commitIter)
jobInfos, err := c.ListJob(pipelineA, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
require.NotNil(t, jobInfos[0].Input.Atom)
require.Equal(t, true, jobInfos[0].Input.Atom.Lazy)
jobInfos, err = c.ListJob(pipelineB, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
require.NotNil(t, jobInfos[0].Input.Atom)
require.Equal(t, true, jobInfos[0].Input.Atom.Lazy)
}
func TestLazyPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestLazyPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &pps.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Glob: "/",
Lazy: true,
},
},
})
require.NoError(t, err)
// Do a commit
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
// We put 2 files, 1 of which will never be touched by the pipeline code.
// This is an important part of the correctness of this test because the
// job-shim sets up a goro for each pipe, pipes that are never opened will
// leak but that shouldn't prevent the job from completing.
_, err = c.PutFile(dataRepo, "master", "file2", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
buffer := bytes.Buffer{}
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
}
func TestEmptyFiles(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestShufflePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("if [ -s /pfs/%s/file]; then exit 1; fi", dataRepo),
fmt.Sprintf("ln -s /pfs/%s/file /pfs/out/file", dataRepo),
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Glob: "/*",
EmptyFiles: true,
},
},
})
require.NoError(t, err)
// Do a commit
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
buffer := bytes.Buffer{}
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
}
// There's an issue where if you use cp with certain flags, it might copy
// special files without reading from them. In our case, we use named pipes
// to simulate lazy files, so the pipes themselves might get copied into
// the output directory, blocking upload.
//
// We've updated the code such that we are able to detect if the files we
// are uploading are pipes, and make the job fail in that case.
func TestLazyPipelineCPPipes(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestLazyPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipeline := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
// Using cp with the -r flag apparently just copes go
Cmd: []string{"cp", "-r", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Glob: "/",
Lazy: true,
},
},
})
require.NoError(t, err)
// Do a commit
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
// wait for job to spawn
time.Sleep(15 * time.Second)
var jobID string
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipeline, nil, nil)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("len(jobInfos) should be 1")
}
jobID = jobInfos[0].Job.ID
jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), &pps.InspectJobRequest{
Job: client.NewJob(jobID),
BlockState: true,
})
if err != nil {
return err
}
if jobInfo.State != pps.JobState_JOB_FAILURE {
return fmt.Errorf("job did not fail, even though it tried to copy " +
"pipes, which should be disallowed by Pachyderm")
}
return nil
}, backoff.NewTestingBackOff()))
}
// TestProvenance creates a pipeline DAG that's not a transitive reduction
// It looks like this:
// A
// | \
// v v
// B-->C
// When we commit to A we expect to see 1 commit on C rather than 2.
func TestProvenance(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
aRepo := tu.UniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
bPipeline := tu.UniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(aRepo, "/*"),
"",
false,
))
cPipeline := tu.UniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("diff %s %s >/pfs/out/file",
path.Join("/pfs", aRepo, "file"), path.Join("/pfs", bPipeline, "file"))},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewAtomInput(aRepo, "/*"),
client.NewAtomInput(bPipeline, "/*"),
),
"",
false,
))
// commit to aRepo
commit1, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit1.ID))
commit2, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit2.ID))
aCommit := commit2
commitIter, err := c.FlushCommit([]*pfs.Commit{aCommit}, []*pfs.Repo{{bPipeline}})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
bCommit := commitInfos[0].Commit
commitIter, err = c.FlushCommit([]*pfs.Commit{aCommit, bCommit}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
cCommitInfo := commitInfos[0]
require.Equal(t, uint64(0), cCommitInfo.SizeBytes)
// We should only see two commits in aRepo
commitInfos, err = c.ListCommit(aRepo, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
// There are three commits in the pipeline repos (two from input commits, and
// one from the CreatePipeline call that created each repo)
commitInfos, err = c.ListCommit(bPipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
commitInfos, err = c.ListCommit(cPipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
}
// TestProvenance2 tests the following DAG:
// A
// / \
// B C
// \ /
// D
func TestProvenance2(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
aRepo := tu.UniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
bPipeline := tu.UniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "bfile"), "/pfs/out/bfile"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(aRepo, "/b*"),
"",
false,
))
cPipeline := tu.UniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "cfile"), "/pfs/out/cfile"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(aRepo, "/c*"),
"",
false,
))
dPipeline := tu.UniqueString("D")
require.NoError(t, c.CreatePipeline(
dPipeline,
"",
[]string{"sh"},
[]string{
fmt.Sprintf("diff /pfs/%s/bfile /pfs/%s/cfile >/pfs/out/file", bPipeline, cPipeline),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewAtomInput(bPipeline, "/*"),
client.NewAtomInput(cPipeline, "/*"),
),
"",
false,
))
// commit to aRepo
commit1, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit1.ID, "bfile", strings.NewReader("foo\n"))
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit1.ID, "cfile", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit1.ID))
commit2, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit2.ID, "bfile", strings.NewReader("bar\n"))
require.NoError(t, err)
_, err = c.PutFile(aRepo, commit2.ID, "cfile", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, commit2.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit2}, []*pfs.Repo{{dPipeline}})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
// We should only see two commits in each repo.
commitInfos, err = c.ListCommit(bPipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
commitInfos, err = c.ListCommit(cPipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
commitInfos, err = c.ListCommit(dPipeline, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commitInfos))
for _, commitInfo := range commitInfos {
commit := commitInfo.Commit
buffer := bytes.Buffer{}
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "", buffer.String())
}
}
// TestFlushCommit
func TestFlushCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
prefix := tu.UniqueString("repo")
makeRepoName := func(i int) string {
return fmt.Sprintf("%s-%d", prefix, i)
}
sourceRepo := makeRepoName(0)
require.NoError(t, c.CreateRepo(sourceRepo))
// Create a five-stage pipeline
numStages := 5
for i := 0; i < numStages; i++ {
repo := makeRepoName(i)
require.NoError(t, c.CreatePipeline(
makeRepoName(i+1),
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/*"),
"",
false,
))
}
for i := 0; i < 10; i++ {
commit, err := c.StartCommit(sourceRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(sourceRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(sourceRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, numStages, len(commitInfos))
jobInfos, err := c.FlushJobAll([]*pfs.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil)
require.NoError(t, err)
require.Equal(t, numStages, len(jobInfos))
}
}
func TestFlushCommitFailures(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestFlushCommitFailures")
require.NoError(t, c.CreateRepo(dataRepo))
prefix := tu.UniqueString("TestFlushCommitFailures")
pipelineName := func(i int) string { return prefix + fmt.Sprintf("%d", i) }
require.NoError(t, c.CreatePipeline(
pipelineName(0),
"",
[]string{"sh"},
[]string{fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo)},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
require.NoError(t, c.CreatePipeline(
pipelineName(1),
"",
[]string{"sh"},
[]string{
fmt.Sprintf("if [ -f /pfs/%s/file1 ]; then exit 1; fi", pipelineName(0)),
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", pipelineName(0)),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(pipelineName(0), "/*"),
"",
false,
))
require.NoError(t, c.CreatePipeline(
pipelineName(2),
"",
[]string{"sh"},
[]string{fmt.Sprintf("cp /pfs/%s/* /pfs/out/", pipelineName(1))},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(pipelineName(1), "/*"),
"",
false,
))
for i := 0; i < 2; i++ {
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
jobInfos, err := c.FlushJobAll([]*pfs.Commit{client.NewCommit(dataRepo, commit.ID)}, nil)
require.NoError(t, err)
require.Equal(t, 3, len(jobInfos))
if i == 0 {
for _, ji := range jobInfos {
require.Equal(t, pps.JobState_JOB_SUCCESS.String(), ji.State.String())
}
} else {
for _, ji := range jobInfos {
if ji.Pipeline.Name != pipelineName(0) {
require.Equal(t, pps.JobState_JOB_FAILURE.String(), ji.State.String())
}
}
}
}
}
func TestFlushCommitAfterCreatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
repo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(repo))
var commit *pfs.Commit
var err error
for i := 0; i < 10; i++ {
commit, err = c.StartCommit(repo, "")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader(fmt.Sprintf("foo%d\n", i)))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
}
require.NoError(t, c.SetBranch(repo, commit.ID, "master"))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(repo, "master")}, nil)
require.NoError(t, err)
collectCommitInfos(t, commitIter)
}
// TestRecreatePipeline tracks #432
func TestRecreatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
repo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
pipeline := tu.UniqueString("pipeline")
createPipeline := func() {
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(collectCommitInfos(t, commitIter)))
}
// Do it twice. We expect jobs to be created on both runs.
createPipeline()
time.Sleep(5 * time.Second)
require.NoError(t, c.DeletePipeline(pipeline))
time.Sleep(5 * time.Second)
createPipeline()
}
func TestDeletePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
repo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(repo))
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, uuid.NewWithoutDashes(), strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
pipelines := []string{tu.UniqueString("TestDeletePipeline1"), tu.UniqueString("TestDeletePipeline2")}
require.NoError(t, c.CreatePipeline(
pipelines[0],
"",
[]string{"sleep", "20"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/*"),
"",
false,
))
require.NoError(t, c.CreatePipeline(
pipelines[1],
"",
[]string{"sleep", "20"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(pipelines[0], "/*"),
"",
false,
))
time.Sleep(10 * time.Second)
// Wait for the pipeline to start running
require.NoError(t, backoff.Retry(func() error {
pipelineInfo, err := c.InspectPipeline(pipelines[1])
if err != nil {
return err
}
if pipelineInfo.State != pps.PipelineState_PIPELINE_RUNNING {
return fmt.Errorf("no running pipeline")
}
return nil
}, backoff.NewTestingBackOff()))
// Can't delete a pipeline from the middle
require.YesError(t, c.DeletePipeline(pipelines[0]))
require.NoError(t, c.DeletePipeline(pipelines[1]))
time.Sleep(5 * time.Second)
// Wait for the pipeline to disappear
require.NoError(t, backoff.Retry(func() error {
_, err := c.InspectPipeline(pipelines[1])
if err == nil {
return fmt.Errorf("expected pipeline to be missing, but it's still present")
}
return nil
}, backoff.NewTestingBackOff()))
// The job should be gone
jobs, err := c.ListJob(pipelines[1], nil, nil)
require.NoError(t, err)
require.Equal(t, len(jobs), 0)
}
func TestPipelineState(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
repo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/*"),
"",
false,
))
// Wait for pipeline to get picked up
time.Sleep(15 * time.Second)
require.NoError(t, backoff.Retry(func() error {
pipelineInfo, err := c.InspectPipeline(pipeline)
if err != nil {
return err
}
if pipelineInfo.State != pps.PipelineState_PIPELINE_RUNNING {
return fmt.Errorf("pipeline should be in state running, not: %s", pipelineInfo.State.String())
}
return nil
}, backoff.NewTestingBackOff()))
// Stop pipeline and wait for the pipeline to pause
require.NoError(t, c.StopPipeline(pipeline))
time.Sleep(5 * time.Second)
require.NoError(t, backoff.Retry(func() error {
pipelineInfo, err := c.InspectPipeline(pipeline)
if err != nil {
return err
}
if pipelineInfo.State != pps.PipelineState_PIPELINE_PAUSED {
return fmt.Errorf("pipeline never paused, even though StopPipeline() was called, state: %s", pipelineInfo.State.String())
}
return nil
}, backoff.NewTestingBackOff()))
// Restart pipeline and wait for the pipeline to resume
require.NoError(t, c.StartPipeline(pipeline))
time.Sleep(15 * time.Second)
require.NoError(t, backoff.Retry(func() error {
pipelineInfo, err := c.InspectPipeline(pipeline)
if err != nil {
return err
}
if pipelineInfo.State != pps.PipelineState_PIPELINE_RUNNING {
return fmt.Errorf("pipeline never restarted, even though StartPipeline() was called, state: %s", pipelineInfo.State.String())
}
return nil
}, backoff.NewTestingBackOff()))
}
func TestPipelineJobCounts(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
repo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/*"),
"",
false,
))
// Trigger a job by creating a commit
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
collectCommitInfos(t, commitIter)
jobInfos, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
inspectJobRequest := &pps.InspectJobRequest{
Job: jobInfos[0].Job,
BlockState: true,
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
_, err = c.PpsAPIClient.InspectJob(ctx, inspectJobRequest)
require.NoError(t, err)
// check that the job has been accounted for
pipelineInfo, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, int32(1), pipelineInfo.JobCounts[int32(pps.JobState_JOB_SUCCESS)])
}
// TODO(msteffen): This test breaks the suite when run against cloud providers,
// because killing the pachd pod breaks the connection with pachctl port-forward
func TestDeleteAfterMembershipChange(t *testing.T) {
t.Skip("This is causing intermittent CI failures")
test := func(up bool) {
repo := tu.UniqueString("TestDeleteAfterMembershipChange")
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
require.NoError(t, c.CreateRepo(repo))
_, err := c.StartCommit(repo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, "master"))
scalePachdRandom(t, up)
c = getUsablePachClient(t)
require.NoError(t, c.DeleteRepo(repo, false))
}
test(true)
test(false)
}
// TODO(msteffen): This test breaks the suite when run against cloud providers,
// because killing the pachd pod breaks the connection with pachctl port-forward
func TestPachdRestartResumesRunningJobs(t *testing.T) {
t.Skip("This is causing intermittent CI failures")
// this test cannot be run in parallel because it restarts everything which breaks other tests.
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPachdRestartPickUpRunningJobs")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{
"sleep 10",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
time.Sleep(5 * time.Second)
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
require.Equal(t, pps.JobState_JOB_RUNNING, jobInfos[0].State)
restartOne(t)
// need a new client because the old one will have a defunct connection
c = getUsablePachClient(t)
jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)
}
// TestUpdatePipelineThatHasNoOutput tracks #1637
func TestUpdatePipelineThatHasNoOutput(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestUpdatePipelineThatHasNoOutput")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"sh"},
[]string{"exit 1"},
nil,
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
// Wait for job to spawn
var jobInfos []*pps.JobInfo
time.Sleep(10 * time.Second)
require.NoError(t, backoff.Retry(func() error {
var err error
jobInfos, err = c.ListJob(pipeline, nil, nil)
if err != nil {
return err
}
if len(jobInfos) < 1 {
return fmt.Errorf("job not spawned")
}
return nil
}, backoff.NewTestingBackOff()))
jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State)
// Now we update the pipeline
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"sh"},
[]string{"exit 1"},
nil,
client.NewAtomInput(dataRepo, "/"),
"",
true,
))
}
func TestAcceptReturnCode(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestAcceptReturnCode")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipelineName := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{pipelineName},
Transform: &pps.Transform{
Cmd: []string{"sh"},
Stdin: []string{"exit 1"},
AcceptReturnCode: []int64{1},
},
Input: client.NewAtomInput(dataRepo, "/*"),
},
)
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)
}
// TODO(msteffen): This test breaks the suite when run against cloud providers,
// because killing the pachd pod breaks the connection with pachctl port-forward
func TestRestartAll(t *testing.T) {
t.Skip("This is causing intermittent CI failures")
// this test cannot be run in parallel because it restarts everything which breaks other tests.
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestRestartAll_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
collectCommitInfos(t, commitIter)
restartAll(t)
// need a new client because the old one will have a defunct connection
c = getUsablePachClient(t)
// Wait a little for pipelines to restart
time.Sleep(10 * time.Second)
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
require.Equal(t, pps.PipelineState_PIPELINE_RUNNING, pipelineInfo.State)
_, err = c.InspectRepo(dataRepo)
require.NoError(t, err)
_, err = c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
}
// TODO(msteffen): This test breaks the suite when run against cloud providers,
// because killing the pachd pod breaks the connection with pachctl port-forward
func TestRestartOne(t *testing.T) {
t.Skip("This is causing intermittent CI failures")
// this test cannot be run in parallel because it restarts everything which breaks other tests.
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestRestartOne_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
collectCommitInfos(t, commitIter)
restartOne(t)
// need a new client because the old one will have a defunct connection
c = getUsablePachClient(t)
_, err = c.InspectPipeline(pipelineName)
require.NoError(t, err)
_, err = c.InspectRepo(dataRepo)
require.NoError(t, err)
_, err = c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
}
func TestPrettyPrinting(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPrettyPrinting_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{pipelineName},
Transform: &pps.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
ResourceRequests: &pps.ResourceSpec{
Memory: "100M",
Cpu: 0.5,
},
Input: client.NewAtomInput(dataRepo, "/*"),
})
require.NoError(t, err)
// Do a commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
repoInfo, err := c.InspectRepo(dataRepo)
require.NoError(t, err)
require.NoError(t, pfspretty.PrintDetailedRepoInfo(repoInfo))
for _, commitInfo := range commitInfos {
require.NoError(t, pfspretty.PrintDetailedCommitInfo(commitInfo))
}
fileInfo, err := c.InspectFile(dataRepo, commit.ID, "file")
require.NoError(t, err)
require.NoError(t, pfspretty.PrintDetailedFileInfo(fileInfo))
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
require.NoError(t, ppspretty.PrintDetailedPipelineInfo(pipelineInfo))
jobInfos, err := c.ListJob("", nil, nil)
require.NoError(t, err)
require.True(t, len(jobInfos) > 0)
require.NoError(t, ppspretty.PrintDetailedJobInfo(jobInfos[0]))
}
func TestDeleteAll(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
// this test cannot be run in parallel because it deletes everything
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestDeleteAll_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
// Do commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(collectCommitInfos(t, commitIter)))
require.NoError(t, c.DeleteAll())
repoInfos, err := c.ListRepo()
require.NoError(t, err)
require.Equal(t, 0, len(repoInfos))
pipelineInfos, err := c.ListPipeline()
require.NoError(t, err)
require.Equal(t, 0, len(pipelineInfos))
jobInfos, err := c.ListJob("", nil, nil)
require.NoError(t, err)
require.Equal(t, 0, len(jobInfos))
}
func TestRecursiveCp(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestRecursiveCp_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("TestRecursiveCp")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sh"},
[]string{
fmt.Sprintf("cp -r /pfs/%s /pfs/out", dataRepo),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
// Do commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < 100; i++ {
_, err = c.PutFile(
dataRepo,
commit.ID,
fmt.Sprintf("file%d", i),
strings.NewReader(strings.Repeat("foo\n", 10000)),
)
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(collectCommitInfos(t, commitIter)))
}
func TestPipelineUniqueness(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
repo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(repo))
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/"),
"",
false,
))
err := c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{""},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/"),
"",
false,
)
require.YesError(t, err)
require.Matches(t, "pipeline .*? already exists", err.Error())
}
func TestUpdatePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestUpdatePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{"echo foo >/pfs/out/file"},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("1"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
iter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
collectCommitInfos(t, iter)
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, "master", "file", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
// Update the pipeline, this will not create a new pipeline as reprocess
// isn't set to true.
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{"echo bar >/pfs/out/file"},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
true,
))
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("2"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
iter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
collectCommitInfos(t, iter)
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, "master", "file", 0, 0, &buffer))
require.Equal(t, "bar\n", buffer.String())
// Update the pipeline again, this time with Reprocess: true set. Now we
// should see a different output file
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{"echo buzz >/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/*"),
Update: true,
Reprocess: true,
})
require.NoError(t, err)
iter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
collectCommitInfos(t, iter)
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, "master", "file", 0, 0, &buffer))
require.Equal(t, "buzz\n", buffer.String())
}
func TestUpdatePipelineRunningJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestUpdatePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{"sleep 1000"},
&pps.ParallelismSpec{
Constant: 2,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
numFiles := 50
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(""))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit2.ID, fmt.Sprintf("file-%d", i+numFiles), strings.NewReader(""))
}
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
b := backoff.NewTestingBackOff()
b.MaxElapsedTime = 30 * time.Second
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipelineName, nil, nil)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("wrong number of jobs")
}
if pps.JobState_JOB_RUNNING != jobInfos[0].State {
return fmt.Errorf("wrong state: %v for %s", jobInfos[0].State, jobInfos[0].Job.ID)
}
return nil
}, b))
// Update the pipeline. This will not create a new pipeline as reprocess
// isn't set to true.
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{"true"},
&pps.ParallelismSpec{
Constant: 2,
},
client.NewAtomInput(dataRepo, "/*"),
"",
true,
))
iter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
collectCommitInfos(t, iter)
// Currently, commits finish shortly before their respecive JobInfo documents
// are updated (the pipeline master receives the commit update and then
// updates the JobInfo document). Wait briefly for this to happen
time.Sleep(10 * time.Second)
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobInfos))
require.Equal(t, pps.JobState_JOB_SUCCESS.String(), jobInfos[0].State.String())
require.Equal(t, pps.JobState_JOB_KILLED.String(), jobInfos[1].State.String())
}
func TestManyFilesSingleCommit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestManyFilesSingleCommit_data")
require.NoError(t, c.CreateRepo(dataRepo))
// Request enough to require more than one page of results
numFiles := 20000
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, "master", fmt.Sprintf("file-%d", i), strings.NewReader(""))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, "master"))
fileInfos, err := c.ListFile(dataRepo, "master", "")
require.NoError(t, err)
require.Equal(t, numFiles, len(fileInfos))
}
func TestStopPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
// Stop the pipeline, so it doesn't process incoming commits
require.NoError(t, c.StopPipeline(pipelineName))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
// wait for 10 seconds and check that no commit has been outputted
time.Sleep(10 * time.Second)
commits, err := c.ListCommit(pipelineName, "master", "", 0)
require.NoError(t, err)
require.Equal(t, len(commits), 0)
// Restart pipeline, and make sure old commit is processed
require.NoError(t, c.StartPipeline(pipelineName))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
}
func TestStandby(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
t.Run("ChainOf10", func(t *testing.T) {
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestStandby_data")
require.NoError(t, c.CreateRepo(dataRepo))
numPipelines := 10
pipelines := make([]string, numPipelines)
for i := 0; i < numPipelines; i++ {
pipelines[i] = tu.UniqueString("TestStandby")
input := dataRepo
if i > 0 {
input = pipelines[i-1]
}
_, err := c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelines[i]),
Transform: &pps.Transform{
Cmd: []string{"true"},
},
Input: client.NewAtomInput(input, "/*"),
Standby: true,
},
)
require.NoError(t, err)
}
require.NoErrorWithinTRetry(t, time.Second*30, func() error {
pis, err := c.ListPipeline()
require.NoError(t, err)
var standby int
for _, pi := range pis {
if pi.State == pps.PipelineState_PIPELINE_STANDBY {
standby++
}
}
if standby != numPipelines {
return fmt.Errorf("should have %d pipelines in standby, not %d", numPipelines, standby)
}
return nil
})
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
var eg errgroup.Group
var finished bool
eg.Go(func() error {
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
collectCommitInfos(t, commitIter)
finished = true
return nil
})
eg.Go(func() error {
for !finished {
pis, err := c.ListPipeline()
require.NoError(t, err)
var active int
for _, pi := range pis {
if pi.State != pps.PipelineState_PIPELINE_STANDBY {
active++
}
}
// We tolerate having 2 pipelines out of standby because there's
// latency associated with entering and exiting standby.
require.True(t, active <= 2, "active: %d", active)
}
return nil
})
eg.Wait()
})
t.Run("ManyCommits", func(t *testing.T) {
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestStandby_data")
pipeline := tu.UniqueString("TestStandby")
require.NoError(t, c.CreateRepo(dataRepo))
_, err := c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"sh"},
Stdin: []string{"echo $PPS_POD_NAME >/pfs/out/pod"},
},
Input: client.NewAtomInput(dataRepo, "/"),
Standby: true,
},
)
require.NoError(t, err)
numCommits := 100
for i := 0; i < numCommits; i++ {
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
}
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
pod := ""
cis, err := c.ListCommit(pipeline, "master", "", 0)
require.NoError(t, err)
for _, ci := range cis {
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipeline, ci.Commit.ID, "pod", 0, 0, &buffer))
if pod == "" {
pod = buffer.String()
} else {
require.True(t, pod == buffer.String(), "multiple pods were used to process commits")
}
}
pi, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, pps.PipelineState_PIPELINE_STANDBY.String(), pi.State.String())
})
}
func TestPipelineEnv(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
// make a secret to reference
k := getKubeClient(t)
secretName := tu.UniqueString("test-secret")
_, err := k.CoreV1().Secrets(v1.NamespaceDefault).Create(
&v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
},
Data: map[string][]byte{
"foo": []byte("foo\n"),
},
},
)
require.NoError(t, err)
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipelineEnv_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &pps.Transform{
Cmd: []string{"sh"},
Stdin: []string{
"ls /var/secret",
"cat /var/secret/foo > /pfs/out/foo",
"echo $bar> /pfs/out/bar",
"echo $foo> /pfs/out/foo_env",
},
Env: map[string]string{"bar": "bar"},
Secrets: []*pps.Secret{
{
Name: secretName,
Key: "foo",
MountPath: "/var/secret",
EnvVar: "foo",
},
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/*"),
})
require.NoError(t, err)
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(pipelineName, commitInfos[0].Commit.ID, "foo", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, commitInfos[0].Commit.ID, "foo_env", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
buffer.Reset()
require.NoError(t, c.GetFile(pipelineName, commitInfos[0].Commit.ID, "bar", 0, 0, &buffer))
require.Equal(t, "bar\n", buffer.String())
}
func TestPipelineWithFullObjects(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
commitInfoIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, commit1.ID)}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitInfoIter)
require.Equal(t, 1, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
// Do second commit to repo
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
commitInfoIter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitInfoIter)
require.Equal(t, 1, len(commitInfos))
buffer = bytes.Buffer{}
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\nbar\n", buffer.String())
}
func TestPipelineWithExistingInputCommits(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
// Do second commit to repo
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitInfoIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitInfoIter)
require.Equal(t, 1, len(commitInfos))
buffer := bytes.Buffer{}
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\nbar\n", buffer.String())
// Check that one output commit is created (processing the inputs' head commits)
commitInfos, err = c.ListCommit(pipelineName, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 1, len(commitInfos))
}
func TestPipelineThatSymlinks(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{
// Symlinks to input files
fmt.Sprintf("ln -s /pfs/%s/foo /pfs/out/foo", dataRepo),
fmt.Sprintf("ln -s /pfs/%s/dir1/bar /pfs/out/bar", dataRepo),
"mkdir /pfs/out/dir",
fmt.Sprintf("ln -s /pfs/%s/dir2 /pfs/out/dir/dir2", dataRepo),
// Symlinks to external files
"echo buzz > /tmp/buzz",
"ln -s /tmp/buzz /pfs/out/buzz",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
// Do first commit to repo
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "foo", strings.NewReader("foo"))
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "dir1/bar", strings.NewReader("bar"))
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "dir2/foo", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitInfoIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitInfoIter)
require.Equal(t, 1, len(commitInfos))
// Check that the output files are identical to the input files.
buffer := bytes.Buffer{}
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "foo", 0, 0, &buffer))
require.Equal(t, "foo", buffer.String())
buffer.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "bar", 0, 0, &buffer))
require.Equal(t, "bar", buffer.String())
buffer.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "dir/dir2/foo", 0, 0, &buffer))
require.Equal(t, "foo", buffer.String())
buffer.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "buzz", 0, 0, &buffer))
require.Equal(t, "buzz\n", buffer.String())
// Make sure that we skipped the upload by checking that the input file
// and the output file have the same object refs.
inputFooFileInfo, err := c.InspectFile(dataRepo, commit.ID, "foo")
require.NoError(t, err)
outputFooFileInfo, err := c.InspectFile(pipelineName, commitInfos[0].Commit.ID, "foo")
require.NoError(t, err)
require.Equal(t, inputFooFileInfo.Objects, outputFooFileInfo.Objects)
inputFooFileInfo, err = c.InspectFile(dataRepo, commit.ID, "dir1/bar")
require.NoError(t, err)
outputFooFileInfo, err = c.InspectFile(pipelineName, commitInfos[0].Commit.ID, "bar")
require.NoError(t, err)
require.Equal(t, inputFooFileInfo.Objects, outputFooFileInfo.Objects)
inputFooFileInfo, err = c.InspectFile(dataRepo, commit.ID, "dir2/foo")
require.NoError(t, err)
outputFooFileInfo, err = c.InspectFile(pipelineName, commitInfos[0].Commit.ID, "dir/dir2/foo")
require.NoError(t, err)
require.Equal(t, inputFooFileInfo.Objects, outputFooFileInfo.Objects)
}
// TestChainedPipelines tracks https://github.com/pachyderm/pachyderm/issues/797
func TestChainedPipelines(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
aRepo := tu.UniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
dRepo := tu.UniqueString("D")
require.NoError(t, c.CreateRepo(dRepo))
aCommit, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, "master"))
dCommit, err := c.StartCommit(dRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dRepo, "master", "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dRepo, "master"))
bPipeline := tu.UniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(aRepo, "/"),
"",
false,
))
cPipeline := tu.UniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("cp /pfs/%s/file /pfs/out/bFile", bPipeline),
fmt.Sprintf("cp /pfs/%s/file /pfs/out/dFile", dRepo)},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewAtomInput(bPipeline, "/"),
client.NewAtomInput(dRepo, "/"),
),
"",
false,
))
resultIter, err := c.FlushCommit([]*pfs.Commit{aCommit, dCommit}, nil)
require.NoError(t, err)
results := collectCommitInfos(t, resultIter)
require.Equal(t, 1, len(results))
require.Equal(t, cPipeline, results[0].Commit.Repo.Name)
var buf bytes.Buffer
require.NoError(t, c.GetFile(cPipeline, results[0].Commit.ID, "bFile", 0, 0, &buf))
require.Equal(t, "foo\n", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(cPipeline, results[0].Commit.ID, "dFile", 0, 0, &buf))
require.Equal(t, "bar\n", buf.String())
}
// DAG:
//
// A
// |
// B E
// | /
// C
// |
// D
func TestChainedPipelinesNoDelay(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
aRepo := tu.UniqueString("A")
require.NoError(t, c.CreateRepo(aRepo))
eRepo := tu.UniqueString("E")
require.NoError(t, c.CreateRepo(eRepo))
aCommit, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, "master"))
eCommit, err := c.StartCommit(eRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(eRepo, "master", "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(eRepo, "master"))
bPipeline := tu.UniqueString("B")
require.NoError(t, c.CreatePipeline(
bPipeline,
"",
[]string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(aRepo, "/"),
"",
false,
))
cPipeline := tu.UniqueString("C")
require.NoError(t, c.CreatePipeline(
cPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("cp /pfs/%s/file /pfs/out/bFile", bPipeline),
fmt.Sprintf("cp /pfs/%s/file /pfs/out/eFile", eRepo)},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewAtomInput(bPipeline, "/"),
client.NewAtomInput(eRepo, "/"),
),
"",
false,
))
dPipeline := tu.UniqueString("D")
require.NoError(t, c.CreatePipeline(
dPipeline,
"",
[]string{"sh"},
[]string{fmt.Sprintf("cp /pfs/%s/bFile /pfs/out/bFile", cPipeline),
fmt.Sprintf("cp /pfs/%s/eFile /pfs/out/eFile", cPipeline)},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(cPipeline, "/"),
"",
false,
))
resultsIter, err := c.FlushCommit([]*pfs.Commit{aCommit, eCommit}, nil)
require.NoError(t, err)
results := collectCommitInfos(t, resultsIter)
require.Equal(t, 2, len(results))
eCommit2, err := c.StartCommit(eRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(eRepo, "master", "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(eRepo, "master"))
resultsIter, err = c.FlushCommit([]*pfs.Commit{eCommit2}, nil)
require.NoError(t, err)
results = collectCommitInfos(t, resultsIter)
require.Equal(t, 2, len(results))
// Get number of jobs triggered in pipeline D
jobInfos, err := c.ListJob(dPipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobInfos))
}
func collectCommitInfos(t testing.TB, commitInfoIter client.CommitInfoIterator) []*pfs.CommitInfo {
var commitInfos []*pfs.CommitInfo
for {
commitInfo, err := commitInfoIter.Next()
if err == io.EOF {
return commitInfos
}
require.NoError(t, err)
commitInfos = append(commitInfos, commitInfo)
}
}
func TestParallelismSpec(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
kubeclient := getKubeClient(t)
nodes, err := kubeclient.CoreV1().Nodes().List(metav1.ListOptions{})
numNodes := len(nodes.Items)
// Test Constant strategy
parellelism, err := ppsutil.GetExpectedNumWorkers(getKubeClient(t), &pps.ParallelismSpec{
Constant: 7,
})
require.NoError(t, err)
require.Equal(t, 7, parellelism)
// Coefficient == 1 (basic test)
// TODO(msteffen): This test can fail when run against cloud providers, if the
// remote cluster has more than one node (in which case "Coefficient: 1" will
// cause more than 1 worker to start)
parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, &pps.ParallelismSpec{
Coefficient: 1,
})
require.NoError(t, err)
require.Equal(t, numNodes, parellelism)
// Coefficient > 1
parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, &pps.ParallelismSpec{
Coefficient: 2,
})
require.NoError(t, err)
require.Equal(t, 2*numNodes, parellelism)
// Make sure we start at least one worker
parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, &pps.ParallelismSpec{
Coefficient: 0.01,
})
require.NoError(t, err)
require.Equal(t, 1, parellelism)
// Test 0-initialized JobSpec
parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, &pps.ParallelismSpec{})
require.NoError(t, err)
require.Equal(t, 1, parellelism)
// Test nil JobSpec
parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, nil)
require.NoError(t, err)
require.Equal(t, 1, parellelism)
}
func TestPipelineJobDeletion(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
_, err = commitIter.Next()
require.NoError(t, err)
// Now delete the corresponding job
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
err = c.DeleteJob(jobInfos[0].Job.ID)
require.NoError(t, err)
}
func TestStopJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestStopJob")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline-stop-job")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"sleep", "20"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
// Create two input commits to trigger two jobs.
// We will stop the first job midway through, and assert that the
// second job finishes.
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
var jobID string
b := backoff.NewTestingBackOff()
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
if len(jobInfos) != 1 {
return fmt.Errorf("len(jobInfos) should be 1")
}
jobID = jobInfos[0].Job.ID
if pps.JobState_JOB_RUNNING != jobInfos[0].State {
return fmt.Errorf("jobInfos[0] has the wrong state")
}
return nil
}, b))
// Now stop the first job
err = c.StopJob(jobID)
require.NoError(t, err)
jobInfo, err := c.InspectJob(jobID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_KILLED, jobInfo.State)
b.Reset()
// Check that the second job completes
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
if len(jobInfos) != 2 {
return fmt.Errorf("len(jobInfos) should be 2")
}
jobID = jobInfos[0].Job.ID
return nil
}, b))
jobInfo, err = c.InspectJob(jobID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)
}
func TestGetLogs(t *testing.T) {
testGetLogs(t, false)
}
func TestGetLogsWithStats(t *testing.T) {
testGetLogs(t, true)
}
func testGetLogs(t *testing.T, enableStats bool) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
iter := c.GetLogs("", "", nil, "", false, false, 0)
for iter.Next() {
}
require.NoError(t, iter.Err())
// create repos
dataRepo := tu.UniqueString("data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &pps.Transform{
Cmd: []string{"sh"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/file /pfs/out/file", dataRepo),
"echo foo",
"echo %s", // %s tests a formatting bug we had (#2729)
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: enableStats,
ParallelismSpec: &pps.ParallelismSpec{
Constant: 4,
},
})
require.NoError(t, err)
// Commit data to repo and flush commit
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
_, err = c.FlushJobAll([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
// Get logs from pipeline, using pipeline
iter = c.GetLogs(pipelineName, "", nil, "", false, false, 0)
var numLogs int
var loglines []string
for iter.Next() {
if !iter.Message().User {
continue
}
numLogs++
require.True(t, iter.Message().Message != "")
loglines = append(loglines, strings.TrimSuffix(iter.Message().Message, "\n"))
require.False(t, strings.Contains(iter.Message().Message, "MISSING"), iter.Message().Message)
}
require.True(t, numLogs >= 2, "logs:\n%s", strings.Join(loglines, "\n"))
require.NoError(t, iter.Err())
// Get logs from pipeline, using pipeline (tailing the last two log lines)
iter = c.GetLogs(pipelineName, "", nil, "", false, false, 2)
numLogs = 0
loglines = []string{}
for iter.Next() {
numLogs++
require.True(t, iter.Message().Message != "")
loglines = append(loglines, strings.TrimSuffix(iter.Message().Message, "\n"))
}
require.True(t, numLogs >= 2, "logs:\n%s", strings.Join(loglines, "\n"))
require.NoError(t, iter.Err())
// Get logs from pipeline, using a pipeline that doesn't exist. There should
// be an error
iter = c.GetLogs("__DOES_NOT_EXIST__", "", nil, "", false, false, 0)
require.False(t, iter.Next())
require.YesError(t, iter.Err())
require.Matches(t, "could not get", iter.Err().Error())
// Get logs from pipeline, using job
// (1) Get job ID, from pipeline that just ran
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
require.True(t, len(jobInfos) == 1)
// (2) Get logs using extracted job ID
// wait for logs to be collected
time.Sleep(10 * time.Second)
iter = c.GetLogs("", jobInfos[0].Job.ID, nil, "", false, false, 0)
numLogs = 0
for iter.Next() {
numLogs++
require.True(t, iter.Message().Message != "")
}
// Make sure that we've seen some logs
require.NoError(t, iter.Err())
require.True(t, numLogs > 0)
// Get logs for datums but don't specify pipeline or job. These should error
iter = c.GetLogs("", "", []string{"/foo"}, "", false, false, 0)
require.False(t, iter.Next())
require.YesError(t, iter.Err())
resp, err := c.ListDatum(jobInfos[0].Job.ID, 0, 0)
require.NoError(t, err)
require.True(t, len(resp.DatumInfos) > 0)
iter = c.GetLogs("", "", nil, resp.DatumInfos[0].Datum.ID, false, false, 0)
require.False(t, iter.Next())
require.YesError(t, iter.Err())
// Get logs from pipeline, using a job that doesn't exist. There should
// be an error
iter = c.GetLogs("", "__DOES_NOT_EXIST__", nil, "", false, false, 0)
require.False(t, iter.Next())
require.YesError(t, iter.Err())
require.Matches(t, "could not get", iter.Err().Error())
// Filter logs based on input (using file that exists). Get logs using file
// path, hex hash, and base64 hash, and make sure you get the same log lines
fileInfo, err := c.InspectFile(dataRepo, commit.ID, "/file")
require.NoError(t, err)
// TODO(msteffen) This code shouldn't be wrapped in a backoff, but for some
// reason GetLogs is not yet 100% consistent. This reduces flakes in testing.
require.NoError(t, backoff.Retry(func() error {
pathLog := c.GetLogs("", jobInfos[0].Job.ID, []string{"/file"}, "", false, false, 0)
hexHash := "19fdf57bdf9eb5a9602bfa9c0e6dd7ed3835f8fd431d915003ea82747707be66"
require.Equal(t, hexHash, hex.EncodeToString(fileInfo.Hash)) // sanity-check test
hexLog := c.GetLogs("", jobInfos[0].Job.ID, []string{hexHash}, "", false, false, 0)
base64Hash := "Gf31e9+etalgK/qcDm3X7Tg1+P1DHZFQA+qCdHcHvmY="
require.Equal(t, base64Hash, base64.StdEncoding.EncodeToString(fileInfo.Hash))
base64Log := c.GetLogs("", jobInfos[0].Job.ID, []string{base64Hash}, "", false, false, 0)
numLogs = 0
for {
havePathLog, haveHexLog, haveBase64Log := pathLog.Next(), hexLog.Next(), base64Log.Next()
if havePathLog != haveHexLog || haveHexLog != haveBase64Log {
return fmt.Errorf("Unequal log lengths")
}
if !havePathLog {
break
}
numLogs++
if pathLog.Message().Message != hexLog.Message().Message ||
hexLog.Message().Message != base64Log.Message().Message {
return fmt.Errorf(
"unequal logs, pathLogs: \"%s\" hexLog: \"%s\" base64Log: \"%s\"",
pathLog.Message().Message,
hexLog.Message().Message,
base64Log.Message().Message)
}
}
for _, logsiter := range []*client.LogsIter{pathLog, hexLog, base64Log} {
if logsiter.Err() != nil {
return logsiter.Err()
}
}
if numLogs == 0 {
return fmt.Errorf("no logs found")
}
return nil
}, backoff.NewTestingBackOff()))
// Filter logs based on input (using file that doesn't exist). There should
// be no logs
iter = c.GetLogs("", jobInfos[0].Job.ID, []string{"__DOES_NOT_EXIST__"}, "", false, false, 0)
require.False(t, iter.Next())
require.NoError(t, iter.Err())
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
iter = c.WithCtx(ctx).GetLogs(pipelineName, "", nil, "", false, false, 0)
numLogs = 0
for iter.Next() {
numLogs++
if numLogs == 8 {
// Do another commit so there's logs to receive with follow
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
}
require.True(t, iter.Message().Message != "")
if numLogs == 16 {
break
}
}
require.NoError(t, iter.Err())
}
func TestPfsPutFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
repo1 := tu.UniqueString("TestPfsPutFile1")
require.NoError(t, c.CreateRepo(repo1))
repo2 := tu.UniqueString("TestPfsPutFile2")
require.NoError(t, c.CreateRepo(repo2))
commit1, err := c.StartCommit(repo1, "")
require.NoError(t, err)
_, err = c.PutFile(repo1, commit1.ID, "file1", strings.NewReader("foo\n"))
require.NoError(t, err)
_, err = c.PutFile(repo1, commit1.ID, "file2", strings.NewReader("bar\n"))
require.NoError(t, err)
_, err = c.PutFile(repo1, commit1.ID, "dir1/file3", strings.NewReader("fizz\n"))
require.NoError(t, err)
for i := 0; i < 100; i++ {
_, err = c.PutFile(repo1, commit1.ID, fmt.Sprintf("dir1/dir2/file%d", i), strings.NewReader(fmt.Sprintf("content%d\n", i)))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(repo1, commit1.ID))
commit2, err := c.StartCommit(repo2, "")
require.NoError(t, err)
err = c.PutFileURL(repo2, commit2.ID, "file", fmt.Sprintf("pfs://0.0.0.0:650/%s/%s/file1", repo1, commit1.ID), false, false)
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo2, commit2.ID))
var buf bytes.Buffer
require.NoError(t, c.GetFile(repo2, commit2.ID, "file", 0, 0, &buf))
require.Equal(t, "foo\n", buf.String())
commit3, err := c.StartCommit(repo2, "")
require.NoError(t, err)
err = c.PutFileURL(repo2, commit3.ID, "", fmt.Sprintf("pfs://0.0.0.0:650/%s/%s", repo1, commit1.ID), true, false)
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo2, commit3.ID))
buf = bytes.Buffer{}
require.NoError(t, c.GetFile(repo2, commit3.ID, "file1", 0, 0, &buf))
require.Equal(t, "foo\n", buf.String())
buf = bytes.Buffer{}
require.NoError(t, c.GetFile(repo2, commit3.ID, "file2", 0, 0, &buf))
require.Equal(t, "bar\n", buf.String())
buf = bytes.Buffer{}
require.NoError(t, c.GetFile(repo2, commit3.ID, "dir1/file3", 0, 0, &buf))
require.Equal(t, "fizz\n", buf.String())
for i := 0; i < 100; i++ {
buf = bytes.Buffer{}
require.NoError(t, c.GetFile(repo2, commit3.ID, fmt.Sprintf("dir1/dir2/file%d", i), 0, 0, &buf))
require.Equal(t, fmt.Sprintf("content%d\n", i), buf.String())
}
}
func TestAllDatumsAreProcessed(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo1 := tu.UniqueString("TestAllDatumsAreProcessed_data1")
require.NoError(t, c.CreateRepo(dataRepo1))
dataRepo2 := tu.UniqueString("TestAllDatumsAreProcessed_data2")
require.NoError(t, c.CreateRepo(dataRepo2))
commit1, err := c.StartCommit(dataRepo1, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo1, "master", "file1", strings.NewReader("foo\n"))
require.NoError(t, err)
_, err = c.PutFile(dataRepo1, "master", "file2", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo1, "master"))
commit2, err := c.StartCommit(dataRepo2, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo2, "master", "file1", strings.NewReader("foo\n"))
require.NoError(t, err)
_, err = c.PutFile(dataRepo2, "master", "file2", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo2, "master"))
require.NoError(t, c.CreatePipeline(
tu.UniqueString("TestAllDatumsAreProcessed_pipelines"),
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/%s/* /pfs/%s/* > /pfs/out/file", dataRepo1, dataRepo2),
},
nil,
client.NewCrossInput(
client.NewAtomInput(dataRepo1, "/*"),
client.NewAtomInput(dataRepo2, "/*"),
),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1, commit2}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
// should be 8 because each file gets copied twice due to cross product
require.Equal(t, strings.Repeat("foo\n", 8), buf.String())
}
func TestDatumStatusRestart(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestDatumDedup_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
// This pipeline sleeps for 20 secs per datum
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"sleep 20",
},
nil,
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
var jobID string
var datumStarted time.Time
// checkStatus waits for 'pipeline' to start and makes sure that each time
// it's called, the datum being processes was started at a new and later time
// (than the last time checkStatus was called)
checkStatus := func() {
require.NoError(t, backoff.Retry(func() error {
// get the
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
if len(jobs) == 0 {
return fmt.Errorf("no jobs found")
}
jobID = jobs[0].Job.ID
jobInfo, err := c.InspectJob(jobs[0].Job.ID, false)
require.NoError(t, err)
if len(jobInfo.WorkerStatus) == 0 {
return fmt.Errorf("no worker statuses")
}
if jobInfo.WorkerStatus[0].JobID == jobInfo.Job.ID {
// The first time this function is called, datumStarted is zero
// so `Before` is true for any non-zero time.
_datumStarted, err := types.TimestampFromProto(jobInfo.WorkerStatus[0].Started)
require.NoError(t, err)
require.True(t, datumStarted.Before(_datumStarted))
datumStarted = _datumStarted
return nil
}
return fmt.Errorf("worker status from wrong job")
}, backoff.RetryEvery(time.Second).For(30*time.Second)))
}
checkStatus()
require.NoError(t, c.RestartDatum(jobID, []string{"/file"}))
checkStatus()
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
}
func TestUseMultipleWorkers(t *testing.T) {
t.Skip("flaky")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestUseMultipleWorkers_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < 20; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
// This pipeline sleeps for 10 secs per datum
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"sleep 10",
},
&pps.ParallelismSpec{
Constant: 2,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
// Get job info 2x/sec for 20s until we confirm two workers for the current job
require.NoError(t, backoff.Retry(func() error {
jobs, err := c.ListJob(pipeline, nil, nil)
if err != nil {
return fmt.Errorf("could not list job: %s", err.Error())
}
if len(jobs) == 0 {
return fmt.Errorf("failed to find job")
}
jobInfo, err := c.InspectJob(jobs[0].Job.ID, false)
if err != nil {
return fmt.Errorf("could not inspect job: %s", err.Error())
}
if len(jobInfo.WorkerStatus) != 2 {
return fmt.Errorf("incorrect number of statuses: %v", len(jobInfo.WorkerStatus))
}
return nil
}, backoff.RetryEvery(500*time.Millisecond).For(20*time.Second)))
}
// TestSystemResourceRequest doesn't create any jobs or pipelines, it
// just makes sure that when pachyderm is deployed, we give rethinkdb, pachd,
// and etcd default resource requests. This prevents them from overloading
// nodes and getting evicted, which can slow down or break a cluster.
func TestSystemResourceRequests(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
kubeClient := getKubeClient(t)
// Expected resource requests for pachyderm system pods:
defaultLocalMem := map[string]string{
"pachd": "512M",
"etcd": "256M",
}
defaultLocalCPU := map[string]string{
"pachd": "250m",
"etcd": "250m",
}
defaultCloudMem := map[string]string{
"pachd": "3G",
"etcd": "2G",
}
defaultCloudCPU := map[string]string{
"pachd": "1",
"etcd": "1",
}
// Get Pod info for 'app' from k8s
var c v1.Container
for _, app := range []string{"pachd", "etcd"} {
err := backoff.Retry(func() error {
podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List(
metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector(
map[string]string{"app": app, "suite": "pachyderm"},
)),
})
if err != nil {
return err
}
if len(podList.Items) < 1 {
return fmt.Errorf("could not find pod for %s", app) // retry
}
c = podList.Items[0].Spec.Containers[0]
return nil
}, backoff.NewTestingBackOff())
require.NoError(t, err)
// Make sure the pod's container has resource requests
cpu, ok := c.Resources.Requests[v1.ResourceCPU]
require.True(t, ok, "could not get CPU request for "+app)
require.True(t, cpu.String() == defaultLocalCPU[app] ||
cpu.String() == defaultCloudCPU[app])
mem, ok := c.Resources.Requests[v1.ResourceMemory]
require.True(t, ok, "could not get memory request for "+app)
require.True(t, mem.String() == defaultLocalMem[app] ||
mem.String() == defaultCloudMem[app])
}
}
// TestPipelineResourceRequest creates a pipeline with a resource request, and
// makes sure that's passed to k8s (by inspecting the pipeline's pods)
func TestPipelineResourceRequest(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipelineResourceRequest")
pipelineName := tu.UniqueString("TestPipelineResourceRequest_Pipeline")
require.NoError(t, c.CreateRepo(dataRepo))
// Resources are not yet in client.CreatePipeline() (we may add them later)
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{pipelineName},
Transform: &pps.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
ResourceRequests: &pps.ResourceSpec{
Memory: "100M",
Cpu: 0.5,
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Branch: "master",
Glob: "/*",
},
},
})
require.NoError(t, err)
// Get info about the pipeline pods from k8s & check for resources
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
var container v1.Container
rcName := ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version)
kubeClient := getKubeClient(t)
err = backoff.Retry(func() error {
podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List(
metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector(
map[string]string{"app": rcName},
)),
})
if err != nil {
return err // retry
}
if len(podList.Items) != 1 || len(podList.Items[0].Spec.Containers) == 0 {
return fmt.Errorf("could not find single container for pipeline %s", pipelineInfo.Pipeline.Name)
}
container = podList.Items[0].Spec.Containers[0]
return nil // no more retries
}, backoff.NewTestingBackOff())
require.NoError(t, err)
// Make sure a CPU and Memory request are both set
cpu, ok := container.Resources.Requests[v1.ResourceCPU]
require.True(t, ok)
require.Equal(t, "500m", cpu.String())
mem, ok := container.Resources.Requests[v1.ResourceMemory]
require.True(t, ok)
require.Equal(t, "100M", mem.String())
_, ok = container.Resources.Requests[v1.ResourceNvidiaGPU]
require.False(t, ok)
}
func TestPipelineResourceLimit(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipelineResourceLimit")
pipelineName := tu.UniqueString("TestPipelineResourceLimit_Pipeline")
require.NoError(t, c.CreateRepo(dataRepo))
// Resources are not yet in client.CreatePipeline() (we may add them later)
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{pipelineName},
Transform: &pps.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
ResourceLimits: &pps.ResourceSpec{
Memory: "100M",
Cpu: 0.5,
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Branch: "master",
Glob: "/*",
},
},
})
require.NoError(t, err)
// Get info about the pipeline pods from k8s & check for resources
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
var container v1.Container
rcName := ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version)
kubeClient := getKubeClient(t)
err = backoff.Retry(func() error {
podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List(metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector(
map[string]string{"app": rcName, "suite": "pachyderm"},
)),
})
if err != nil {
return err // retry
}
if len(podList.Items) != 1 || len(podList.Items[0].Spec.Containers) == 0 {
return fmt.Errorf("could not find single container for pipeline %s", pipelineInfo.Pipeline.Name)
}
container = podList.Items[0].Spec.Containers[0]
return nil // no more retries
}, backoff.NewTestingBackOff())
require.NoError(t, err)
// Make sure a CPU and Memory request are both set
cpu, ok := container.Resources.Limits[v1.ResourceCPU]
require.True(t, ok)
require.Equal(t, "500m", cpu.String())
mem, ok := container.Resources.Limits[v1.ResourceMemory]
require.True(t, ok)
require.Equal(t, "100M", mem.String())
_, ok = container.Resources.Requests[v1.ResourceNvidiaGPU]
require.False(t, ok)
}
func TestPipelineResourceLimitDefaults(t *testing.T) {
// We need to make sure GPU is set to 0 for k8s 1.8
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipelineResourceLimit")
pipelineName := tu.UniqueString("TestPipelineResourceLimit_Pipeline")
require.NoError(t, c.CreateRepo(dataRepo))
// Resources are not yet in client.CreatePipeline() (we may add them later)
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{pipelineName},
Transform: &pps.Transform{
Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Branch: "master",
Glob: "/*",
},
},
})
require.NoError(t, err)
// Get info about the pipeline pods from k8s & check for resources
pipelineInfo, err := c.InspectPipeline(pipelineName)
require.NoError(t, err)
var container v1.Container
rcName := ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version)
kubeClient := getKubeClient(t)
err = backoff.Retry(func() error {
podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List(metav1.ListOptions{
LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector(
map[string]string{"app": rcName, "suite": "pachyderm"},
)),
})
if err != nil {
return err // retry
}
if len(podList.Items) != 1 || len(podList.Items[0].Spec.Containers) == 0 {
return fmt.Errorf("could not find single container for pipeline %s", pipelineInfo.Pipeline.Name)
}
container = podList.Items[0].Spec.Containers[0]
return nil // no more retries
}, backoff.NewTestingBackOff())
require.NoError(t, err)
_, ok := container.Resources.Requests[v1.ResourceNvidiaGPU]
require.False(t, ok)
}
func TestPipelinePartialResourceRequest(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipelinePartialResourceRequest")
pipelineName := tu.UniqueString("pipeline")
require.NoError(t, c.CreateRepo(dataRepo))
// Resources are not yet in client.CreatePipeline() (we may add them later)
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{fmt.Sprintf("%s-%d", pipelineName, 0)},
Transform: &pps.Transform{
Cmd: []string{"true"},
},
ResourceRequests: &pps.ResourceSpec{
Cpu: 0.5,
Memory: "100M",
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Branch: "master",
Glob: "/*",
},
},
})
require.NoError(t, err)
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{fmt.Sprintf("%s-%d", pipelineName, 1)},
Transform: &pps.Transform{
Cmd: []string{"true"},
},
ResourceRequests: &pps.ResourceSpec{
Memory: "100M",
},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Branch: "master",
Glob: "/*",
},
},
})
require.NoError(t, err)
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{fmt.Sprintf("%s-%d", pipelineName, 2)},
Transform: &pps.Transform{
Cmd: []string{"true"},
},
ResourceRequests: &pps.ResourceSpec{},
Input: &pps.Input{
Atom: &pps.AtomInput{
Repo: dataRepo,
Branch: "master",
Glob: "/*",
},
},
})
require.NoError(t, err)
require.NoError(t, backoff.Retry(func() error {
for i := 0; i < 3; i++ {
pipelineInfo, err := c.InspectPipeline(fmt.Sprintf("%s-%d", pipelineName, i))
require.NoError(t, err)
if pipelineInfo.State != pps.PipelineState_PIPELINE_RUNNING {
return fmt.Errorf("pipeline not in running state")
}
}
return nil
}, backoff.NewTestingBackOff()))
}
func TestPipelineLargeOutput(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineInputDataModification_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 100
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(""))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"for i in `seq 1 100`; do touch /pfs/out/$RANDOM; done",
},
&pps.ParallelismSpec{
Constant: 4,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
}
func TestUnionInput(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
var repos []string
for i := 0; i < 4; i++ {
repos = append(repos, tu.UniqueString("TestUnionInput"))
require.NoError(t, c.CreateRepo(repos[i]))
}
numFiles := 2
var commits []*pfs.Commit
for _, repo := range repos {
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
commits = append(commits, commit)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(repo, "master", fmt.Sprintf("file-%d", i), strings.NewReader(fmt.Sprintf("%d", i)))
}
require.NoError(t, c.FinishCommit(repo, "master"))
}
t.Run("union all", func(t *testing.T) {
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp /pfs/*/* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewUnionInput(
client.NewAtomInput(repos[0], "/*"),
client.NewAtomInput(repos[1], "/*"),
client.NewAtomInput(repos[2], "/*"),
client.NewAtomInput(repos[3], "/*"),
),
"",
false,
))
commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
outCommit := commitInfos[0].Commit
fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, "")
require.NoError(t, err)
require.Equal(t, 2, len(fileInfos))
for _, fi := range fileInfos {
// 1 byte per repo
require.Equal(t, uint64(len(repos)), fi.SizeBytes)
}
})
t.Run("union crosses", func(t *testing.T) {
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/TestUnionInput* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewUnionInput(
client.NewCrossInput(
client.NewAtomInput(repos[0], "/*"),
client.NewAtomInput(repos[1], "/*"),
),
client.NewCrossInput(
client.NewAtomInput(repos[2], "/*"),
client.NewAtomInput(repos[3], "/*"),
),
),
"",
false,
))
commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
outCommit := commitInfos[0].Commit
for _, repo := range repos {
fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, repo)
require.NoError(t, err)
require.Equal(t, 2, len(fileInfos))
for _, fi := range fileInfos {
// each file should be seen twice
require.Equal(t, uint64(2), fi.SizeBytes)
}
}
})
t.Run("cross unions", func(t *testing.T) {
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/TestUnionInput* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewUnionInput(
client.NewAtomInput(repos[0], "/*"),
client.NewAtomInput(repos[1], "/*"),
),
client.NewUnionInput(
client.NewAtomInput(repos[2], "/*"),
client.NewAtomInput(repos[3], "/*"),
),
),
"",
false,
))
commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
outCommit := commitInfos[0].Commit
for _, repo := range repos {
fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, repo)
require.NoError(t, err)
require.Equal(t, 2, len(fileInfos))
for _, fi := range fileInfos {
// each file should be seen twice
require.Equal(t, uint64(4), fi.SizeBytes)
}
}
})
t.Run("union alias", func(t *testing.T) {
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/in /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewUnionInput(
client.NewAtomInputOpts("in", repos[0], "", "/*", false),
client.NewAtomInputOpts("in", repos[1], "", "/*", false),
client.NewAtomInputOpts("in", repos[2], "", "/*", false),
client.NewAtomInputOpts("in", repos[3], "", "/*", false),
),
"",
false,
))
commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
outCommit := commitInfos[0].Commit
fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, "in")
require.NoError(t, err)
require.Equal(t, 2, len(fileInfos))
for _, fi := range fileInfos {
require.Equal(t, uint64(4), fi.SizeBytes)
}
})
t.Run("union cross alias", func(t *testing.T) {
pipeline := tu.UniqueString("pipeline")
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/in* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewUnionInput(
client.NewCrossInput(
client.NewAtomInputOpts("in1", repos[0], "", "/*", false),
client.NewAtomInputOpts("in1", repos[1], "", "/*", false),
),
client.NewCrossInput(
client.NewAtomInputOpts("in2", repos[2], "", "/*", false),
client.NewAtomInputOpts("in2", repos[3], "", "/*", false),
),
),
"",
false,
))
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/in* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewUnionInput(
client.NewCrossInput(
client.NewAtomInputOpts("in1", repos[0], "", "/*", false),
client.NewAtomInputOpts("in2", repos[1], "", "/*", false),
),
client.NewCrossInput(
client.NewAtomInputOpts("in1", repos[2], "", "/*", false),
client.NewAtomInputOpts("in2", repos[3], "", "/*", false),
),
),
"",
false,
))
commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
outCommit := commitInfos[0].Commit
for _, dir := range []string{"in1", "in2"} {
fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, dir)
require.NoError(t, err)
require.Equal(t, 2, len(fileInfos))
for _, fi := range fileInfos {
// each file should be seen twice
require.Equal(t, uint64(4), fi.SizeBytes)
}
}
})
t.Run("cross union alias", func(t *testing.T) {
pipeline := tu.UniqueString("pipeline")
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/in* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewUnionInput(
client.NewAtomInputOpts("in1", repos[0], "", "/*", false),
client.NewAtomInputOpts("in2", repos[1], "", "/*", false),
),
client.NewUnionInput(
client.NewAtomInputOpts("in1", repos[2], "", "/*", false),
client.NewAtomInputOpts("in2", repos[3], "", "/*", false),
),
),
"",
false,
))
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"cp -r /pfs/in* /pfs/out",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewUnionInput(
client.NewAtomInputOpts("in1", repos[0], "", "/*", false),
client.NewAtomInputOpts("in1", repos[1], "", "/*", false),
),
client.NewUnionInput(
client.NewAtomInputOpts("in2", repos[2], "", "/*", false),
client.NewAtomInputOpts("in2", repos[3], "", "/*", false),
),
),
"",
false,
))
commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
outCommit := commitInfos[0].Commit
for _, dir := range []string{"in1", "in2"} {
fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, dir)
require.NoError(t, err)
require.Equal(t, 2, len(fileInfos))
for _, fi := range fileInfos {
// each file should be seen twice
require.Equal(t, uint64(8), fi.SizeBytes)
}
}
})
}
func TestIncrementalOverwritePipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestIncrementalOverwritePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"touch /pfs/out/sum",
fmt.Sprintf("SUM=`cat /pfs/%s/data /pfs/out/sum | awk '{sum+=$1} END {print sum}'`", dataRepo),
"echo $SUM > /pfs/out/sum",
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/"),
Incremental: true,
})
require.NoError(t, err)
expectedValue := 0
for i := 0; i <= 150; i++ {
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.DeleteFile(dataRepo, "master", "data"))
_, err = c.PutFile(dataRepo, "master", "data", strings.NewReader(fmt.Sprintf("%d\n", i)))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
expectedValue += i
}
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(pipeline, "master", "sum", 0, 0, &buf))
require.Equal(t, fmt.Sprintf("%d\n", expectedValue), buf.String())
}
func TestIncrementalAppendPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestIncrementalAppendPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"touch /pfs/out/sum",
fmt.Sprintf("SUM=`cat /pfs/%s/data/* /pfs/out/sum | awk '{sum+=$1} END {print sum}'`", dataRepo),
"echo $SUM > /pfs/out/sum",
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/"),
Incremental: true,
})
require.NoError(t, err)
expectedValue := 0
for i := 0; i <= 150; i++ {
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
w, err := c.PutFileSplitWriter(dataRepo, "master", "data", pfs.Delimiter_LINE, 0, 0, false)
require.NoError(t, err)
_, err = w.Write([]byte(fmt.Sprintf("%d\n", i)))
require.NoError(t, err)
require.NoError(t, w.Close())
require.NoError(t, c.FinishCommit(dataRepo, "master"))
expectedValue += i
}
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(pipeline, "master", "sum", 0, 0, &buf))
require.Equal(t, fmt.Sprintf("%d\n", expectedValue), buf.String())
}
func TestIncrementalOneFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestIncrementalOneFile")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline := tu.UniqueString("pipeline")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"find /pfs",
fmt.Sprintf("cp /pfs/%s/dir/file /pfs/out/file", dataRepo),
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/dir/file"),
Incremental: true,
})
require.NoError(t, err)
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "/dir/file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "/dir/file", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(pipeline, "master", "file", 0, 0, &buf))
require.Equal(t, "foo\nbar\n", buf.String())
}
func TestIncrementalFailure(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestIncrementalFailure_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline := tu.UniqueString("TestIncrementalFailure")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/file /pfs/out/file", dataRepo),
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/"),
Incremental: true,
})
require.NoError(t, err)
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
require.NoError(t, err)
_, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "/file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(pipeline, "master", "file", 0, 0, &buf))
require.Equal(t, "foo\n", buf.String())
}
func TestGarbageCollection(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
// Delete everything, then run garbage collection and finally check that
// we're at a baseline of 0 tags and 0 objects.
require.NoError(t, c.DeleteAll())
require.NoError(t, c.GarbageCollect())
originalObjects := getAllObjects(t, c)
originalTags := getAllTags(t, c)
require.Equal(t, 0, len(originalObjects))
require.Equal(t, 0, len(originalTags))
dataRepo := tu.UniqueString("TestGarbageCollection")
pipeline := tu.UniqueString("TestGarbageCollectionPipeline")
var commit *pfs.Commit
var err error
createInputAndPipeline := func() {
require.NoError(t, c.CreateRepo(dataRepo))
commit, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "foo", strings.NewReader("foo"))
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "bar", strings.NewReader("bar"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
// This pipeline copies foo and modifies bar
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/foo /pfs/out/foo", dataRepo),
fmt.Sprintf("cp /pfs/%s/bar /pfs/out/bar", dataRepo),
"echo bar >> /pfs/out/bar",
},
nil,
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
}
createInputAndPipeline()
objectsBefore := getAllObjects(t, c)
tagsBefore := getAllTags(t, c)
// Try to GC without stopping the pipeline.
require.YesError(t, c.GarbageCollect())
// Now stop the pipeline and GC
require.NoError(t, c.StopPipeline(pipeline))
require.NoError(t, backoff.Retry(c.GarbageCollect, backoff.NewTestingBackOff()))
// Check that data still exists in the input repo
var buf bytes.Buffer
require.NoError(t, c.GetFile(dataRepo, commit.ID, "foo", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(dataRepo, commit.ID, "bar", 0, 0, &buf))
require.Equal(t, "bar", buf.String())
pis, err := c.ListPipeline()
require.NoError(t, err)
require.Equal(t, 1, len(pis))
buf.Reset()
require.NoError(t, c.GetFile(pipeline, "master", "foo", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(pipeline, "master", "bar", 0, 0, &buf))
require.Equal(t, "barbar\n", buf.String())
// Check that no objects or tags have been removed, since we just ran GC
// without deleting anything.
objectsAfter := getAllObjects(t, c)
tagsAfter := getAllTags(t, c)
require.Equal(t, len(tagsBefore), len(tagsAfter))
require.Equal(t, len(objectsBefore), len(objectsAfter))
objectsBefore = objectsAfter
tagsBefore = tagsAfter
// Now delete the pipeline and GC
require.NoError(t, c.DeletePipeline(pipeline))
require.NoError(t, c.GarbageCollect())
// We should've deleted one tag since the pipeline has only processed
// one datum.
// We should've deleted 3 objects: the object referenced by
// the tag, the modified "bar" file and the pipeline's spec.
objectsAfter = getAllObjects(t, c)
tagsAfter = getAllTags(t, c)
require.Equal(t, 1, len(tagsBefore)-len(tagsAfter))
require.Equal(t, 3, len(objectsBefore)-len(objectsAfter))
// Now we delete everything.
require.NoError(t, c.DeleteAll())
require.NoError(t, c.GarbageCollect())
// Since we've now deleted everything that we created in this test,
// the tag count and object count should be back to the originals.
objectsAfter = getAllObjects(t, c)
tagsAfter = getAllTags(t, c)
require.Equal(t, 0, len(tagsAfter))
require.Equal(t, 0, len(objectsAfter))
// Now we create the pipeline again and check that all data is
// accessible. This is important because there used to be a bug
// where we failed to invalidate the cache such that the objects in
// the cache were referencing blocks that had been GC-ed.
createInputAndPipeline()
buf.Reset()
require.NoError(t, c.GetFile(dataRepo, commit.ID, "foo", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(dataRepo, commit.ID, "bar", 0, 0, &buf))
require.Equal(t, "bar", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(pipeline, "master", "foo", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(pipeline, "master", "bar", 0, 0, &buf))
require.Equal(t, "barbar\n", buf.String())
}
func TestPipelineWithStats(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithStats_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 500
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(strings.Repeat("foo\n", 100)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
ParallelismSpec: &pps.ParallelismSpec{
Constant: 4,
},
})
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Check we can list datums before job completion
resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, numFiles, len(resp.DatumInfos))
require.Equal(t, 1, len(resp.DatumInfos[0].Data))
// Check we can list datums before job completion w pagination
resp, err = c.ListDatum(jobs[0].Job.ID, 100, 0)
require.NoError(t, err)
require.Equal(t, 100, len(resp.DatumInfos))
require.Equal(t, int64(numFiles/100), resp.TotalPages)
require.Equal(t, int64(0), resp.Page)
// Block on the job being complete before we call ListDatum again so we're
// sure the datums have actually been processed.
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err = c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, numFiles, len(resp.DatumInfos))
require.Equal(t, 1, len(resp.DatumInfos[0].Data))
for _, datum := range resp.DatumInfos {
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
}
// Make sure inspect-datum works
datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
}
func TestPipelineWithStatsFailedDatums(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithStatsFailedDatums_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 200
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(strings.Repeat("foo\n", 100)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"if [ $RANDOM -gt 15000 ]; then exit 1; fi",
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
ParallelismSpec: &pps.ParallelismSpec{
Constant: 4,
},
})
_, err = c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
// Without this sleep, I get no results from list-job
// See issue: https://github.com/pachyderm/pachyderm/issues/2181
time.Sleep(15 * time.Second)
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Block on the job being complete before we call ListDatum
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, numFiles, len(resp.DatumInfos))
// First entry should be failed
require.Equal(t, pps.DatumState_FAILED, resp.DatumInfos[0].State)
// Last entry should be success
require.Equal(t, pps.DatumState_SUCCESS, resp.DatumInfos[len(resp.DatumInfos)-1].State)
// Make sure inspect-datum works for failed state
datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_FAILED, datum.State)
}
func TestPipelineWithStatsPaginated(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithStatsPaginated_data")
require.NoError(t, c.CreateRepo(dataRepo))
numPages := int64(2)
pageSize := int64(100)
numFiles := int(numPages * pageSize)
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(strings.Repeat("foo\n", 100)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"if [ $RANDOM -gt 15000 ]; then exit 1; fi",
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
ParallelismSpec: &pps.ParallelismSpec{
Constant: 4,
},
})
_, err = c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
var jobs []*pps.JobInfo
require.NoError(t, backoff.Retry(func() error {
jobs, err = c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
if len(jobs) != 1 {
return fmt.Errorf("expected 1 jobs, got %d", len(jobs))
}
return nil
}, backoff.NewTestingBackOff()))
// Block on the job being complete before we call ListDatum
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err := c.ListDatum(jobs[0].Job.ID, pageSize, 0)
require.NoError(t, err)
require.Equal(t, pageSize, int64(len(resp.DatumInfos)))
require.Equal(t, int64(numFiles)/pageSize, resp.TotalPages)
// First entry should be failed
require.Equal(t, pps.DatumState_FAILED, resp.DatumInfos[0].State)
resp, err = c.ListDatum(jobs[0].Job.ID, pageSize, int64(numPages-1))
require.NoError(t, err)
require.Equal(t, pageSize, int64(len(resp.DatumInfos)))
require.Equal(t, int64(int64(numFiles)/pageSize-1), resp.Page)
// Last entry should be success
require.Equal(t, pps.DatumState_SUCCESS, resp.DatumInfos[len(resp.DatumInfos)-1].State)
// Make sure we get error when requesting pages too high
resp, err = c.ListDatum(jobs[0].Job.ID, pageSize, int64(numPages))
require.YesError(t, err)
}
func TestPipelineWithStatsAcrossJobs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithStatsAcrossJobs_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 500
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("foo-%d", i), strings.NewReader(strings.Repeat("foo\n", 100)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("StatsAcrossJobs")
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
ParallelismSpec: &pps.ParallelismSpec{
Constant: 4,
},
})
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Block on the job being complete before we call ListDatum
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, numFiles, len(resp.DatumInfos))
datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit2.ID, fmt.Sprintf("bar-%d", i), strings.NewReader(strings.Repeat("bar\n", 100)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
commitIter, err = c.FlushCommit([]*pfs.Commit{commit2}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err = c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobs))
// Block on the job being complete before we call ListDatum
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err = c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
// we should see all the datums from the first job (which should be skipped)
// in addition to all the new datums processed in this job
require.Equal(t, numFiles*2, len(resp.DatumInfos))
datum, err = c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
// Test datums marked as skipped correctly
// (also tests list datums are sorted by state)
datum, err = c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[numFiles].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_SKIPPED, datum.State)
}
func TestPipelineWithStatsSkippedEdgeCase(t *testing.T) {
// If I add a file in commit1, delete it in commit2, add it again in commit 3 ...
// the datum will be marked as success on the 3rd job, even though it should be marked as skipped
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithStatsSkippedEdgeCase_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 10
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(strings.Repeat("foo\n", 100)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("StatsEdgeCase")
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
ParallelismSpec: &pps.ParallelismSpec{
Constant: 4,
},
})
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Block on the job being complete before we call ListDatum
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, numFiles, len(resp.DatumInfos))
for _, datum := range resp.DatumInfos {
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
}
// Make sure inspect-datum works
datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
// Create a second commit that deletes a file in commit1
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
err = c.DeleteFile(dataRepo, commit2.ID, "file-0")
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
// Create a third commit that re-adds the file removed in commit2
commit3, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit3.ID, "file-0", strings.NewReader(strings.Repeat("foo\n", 100)))
require.NoError(t, c.FinishCommit(dataRepo, commit3.ID))
commitIter, err = c.FlushCommit([]*pfs.Commit{commit3}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err = c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 3, len(jobs))
// Block on the job being complete before we call ListDatum
_, err = c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
resp, err = c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, numFiles, len(resp.DatumInfos))
var states []interface{}
for _, datum := range resp.DatumInfos {
require.Equal(t, pps.DatumState_SKIPPED, datum.State)
states = append(states, datum.State)
}
}
func TestIncrementalSharedProvenance(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestIncrementalSharedProvenance_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline1 := tu.UniqueString("pipeline1")
require.NoError(t, c.CreatePipeline(
pipeline1,
"",
[]string{"true"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
pipeline2 := tu.UniqueString("pipeline2")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline2),
Transform: &pps.Transform{
Cmd: []string{"true"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewCrossInput(
client.NewAtomInput(dataRepo, "/"),
client.NewAtomInput(pipeline1, "/"),
),
Incremental: true,
})
require.YesError(t, err)
pipeline3 := tu.UniqueString("pipeline3")
require.NoError(t, c.CreatePipeline(
pipeline3,
"",
[]string{"true"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
pipeline4 := tu.UniqueString("pipeline4")
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline4),
Transform: &pps.Transform{
Cmd: []string{"true"},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewCrossInput(
client.NewAtomInput(pipeline1, "/"),
client.NewAtomInput(pipeline3, "/"),
),
Incremental: true,
})
require.YesError(t, err)
}
func TestSkippedDatums(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
// create pipeline
pipelineName := tu.UniqueString("pipeline")
// require.NoError(t, c.CreatePipeline(
_, err := c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipelineName),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
ParallelismSpec: &pps.ParallelismSpec{
Constant: 1,
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
})
require.NoError(t, err)
// Do first commit to repo
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
commitInfoIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, commit1.ID)}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitInfoIter)
require.Equal(t, 1, len(commitInfos))
var buffer bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer))
require.Equal(t, "foo\n", buffer.String())
// Do second commit to repo
commit2, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit2.ID, "file2", strings.NewReader("bar\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
commitInfoIter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitInfoIter)
require.Equal(t, 1, len(commitInfos))
/*
jobs, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobs))
datums, err := c.ListDatum(jobs[1].Job.ID)
fmt.Printf("got datums: %v\n", datums)
require.NoError(t, err)
require.Equal(t, 2, len(datums))
datum, err := c.InspectDatum(jobs[1].Job.ID, datums[0].ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_SUCCESS, datum.State)
*/
}
func TestOpencvDemo(t *testing.T) {
t.Skip("flaky")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
require.NoError(t, c.CreateRepo("images"))
commit, err := c.StartCommit("images", "master")
require.NoError(t, err)
require.NoError(t, c.PutFileURL("images", "master", "46Q8nDz.jpg", "http://imgur.com/46Q8nDz.jpg", false, false))
require.NoError(t, c.FinishCommit("images", "master"))
bytes, err := ioutil.ReadFile("../../doc/examples/opencv/edges.json")
require.NoError(t, err)
createPipelineRequest := &pps.CreatePipelineRequest{}
require.NoError(t, json.Unmarshal(bytes, createPipelineRequest))
_, err = c.PpsAPIClient.CreatePipeline(context.Background(), createPipelineRequest)
require.NoError(t, err)
bytes, err = ioutil.ReadFile("../../doc/examples/opencv/montage.json")
require.NoError(t, err)
createPipelineRequest = &pps.CreatePipelineRequest{}
require.NoError(t, json.Unmarshal(bytes, createPipelineRequest))
_, err = c.PpsAPIClient.CreatePipeline(context.Background(), createPipelineRequest)
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 2, len(commitInfos))
}
func TestCronPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
t.Run("SimpleCron", func(t *testing.T) {
pipeline1 := tu.UniqueString("cron1-")
require.NoError(t, c.CreatePipeline(
pipeline1,
"",
[]string{"cp", "/pfs/time/time", "/pfs/out/time"},
nil,
nil,
client.NewCronInput("time", "@every 20s"),
"",
false,
))
pipeline2 := tu.UniqueString("cron2-")
require.NoError(t, c.CreatePipeline(
pipeline2,
"",
[]string{"cp", fmt.Sprintf("/pfs/%s/time", pipeline1), "/pfs/out/time"},
nil,
nil,
client.NewAtomInput(pipeline1, "/*"),
"",
false,
))
// subscribe to the pipeline1 cron repo and wait for inputs
repo := fmt.Sprintf("%s_%s", pipeline1, "time")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
defer cancel() //cleanup resources
iter, err := c.WithCtx(ctx).SubscribeCommit(repo, "master", "", pfs.CommitState_STARTED)
require.NoError(t, err)
commitInfo, err := iter.Next()
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{commitInfo.Commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 2, len(commitInfos))
})
// Create a non-cron input repo, and test a pipeline with a cross of cron and
// non-cron inputs
t.Run("CronAtomCross", func(t *testing.T) {
dataRepo := tu.UniqueString("TestCronPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline3 := tu.UniqueString("cron3-")
require.NoError(t, c.CreatePipeline(
pipeline3,
"",
[]string{"bash"},
[]string{
"cp /pfs/time/time /pfs/out/time",
fmt.Sprintf("cp /pfs/%s/file /pfs/out/file", dataRepo),
},
nil,
client.NewCrossInput(
client.NewCronInput("time", "@every 20s"),
client.NewAtomInput(dataRepo, "/"),
),
"",
false,
))
dataCommit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("file"))
require.NoError(t, c.FinishCommit(dataRepo, "master"))
repo := fmt.Sprintf("%s_%s", pipeline3, "time")
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
iter, err := c.WithCtx(ctx).SubscribeCommit(repo, "master", "", pfs.CommitState_STARTED)
require.NoError(t, err)
commitInfo, err := iter.Next()
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{dataCommit, commitInfo.Commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
})
t.Run("CronIncremental", func(t *testing.T) {
pipeline := tu.UniqueString("CronIncremental-")
req := &pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{Name: pipeline},
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"cat /pfs/time/time >> /pfs/out/time",
"echo \"\" >> /pfs/out/time",
},
},
Input: client.NewCronInput("time", "@every 10s"),
Incremental: true,
}
_, err := c.PpsAPIClient.CreatePipeline(c.Ctx(), req)
require.NoError(t, err)
// subscribe to the pipeline1 cron repo and wait for inputs
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
defer cancel() //cleanup resources
iter, err := c.WithCtx(ctx).SubscribeCommit(pipeline, "master", "", pfs.CommitState_STARTED)
require.NoError(t, err)
for i := 0; i < 5; i++ {
commitInfo, err := iter.Next()
require.NoError(t, err)
_, err = c.BlockCommit(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID)
require.NoError(t, err)
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID, "time", 0, 0, &buf))
require.Equal(t, i+2, len(strings.Split(buf.String(), "\n")))
}
})
t.Run("CronIncrementalFailures", func(t *testing.T) {
pipeline := tu.UniqueString("CronIncremental-")
req := &pps.CreatePipelineRequest{
Pipeline: &pps.Pipeline{Name: pipeline},
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"FLIP=$(($(($RANDOM%10))%2))",
"if [ $FLIP -eq 0 ]; then exit 1; fi",
"cat /pfs/time/time >> /pfs/out/time",
"echo \"\" >> /pfs/out/time",
},
},
Input: client.NewCronInput("time", "@every 10s"),
Incremental: true,
}
_, err := c.PpsAPIClient.CreatePipeline(c.Ctx(), req)
require.NoError(t, err)
// subscribe to the pipeline1 cron repo and wait for inputs
ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
defer cancel() //cleanup resources
iter, err := c.WithCtx(ctx).SubscribeCommit(pipeline, "master", "", pfs.CommitState_STARTED)
require.NoError(t, err)
for i := 0; i < 5; i++ {
commitInfo, err := iter.Next()
require.NoError(t, err)
_, err = c.BlockCommit(commitInfo.Commit.Repo.Name, commitInfo.Commit.ID)
}
})
}
func TestSelfReferentialPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
pipeline := tu.UniqueString("pipeline")
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"true"},
nil,
nil,
client.NewAtomInput(pipeline, "/"),
"",
false,
))
}
func TestPipelineBadImage(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
pipeline1 := tu.UniqueString("bad_pipeline")
require.NoError(t, c.CreatePipeline(
pipeline1,
"BadImage",
[]string{"true"},
nil,
nil,
client.NewCronInput("time", "@every 20s"),
"",
false,
))
pipeline2 := tu.UniqueString("bad_pipeline")
require.NoError(t, c.CreatePipeline(
pipeline2,
"bs/badimage:vcrap",
[]string{"true"},
nil,
nil,
client.NewCronInput("time", "@every 20s"),
"",
false,
))
require.NoError(t, backoff.Retry(func() error {
for _, pipeline := range []string{pipeline1, pipeline2} {
pipelineInfo, err := c.InspectPipeline(pipeline)
if err != nil {
return err
}
if pipelineInfo.State != pps.PipelineState_PIPELINE_FAILURE {
return fmt.Errorf("pipeline %s should have failed", pipeline)
}
require.True(t, pipelineInfo.Reason != "")
}
return nil
}, backoff.NewTestingBackOff()))
}
func TestFixPipeline(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// create repos
dataRepo := tu.UniqueString("TestFixPipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("1"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
pipelineName := tu.UniqueString("TestFixPipeline_pipeline")
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"exit 1"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
if len(jobInfos) != 1 {
return fmt.Errorf("expected 1 jobs, got %d", len(jobInfos))
}
jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State)
return nil
}, backoff.NewTestingBackOff()))
// Update the pipeline, this will not create a new pipeline as reprocess
// isn't set to true.
require.NoError(t, c.CreatePipeline(
pipelineName,
"",
[]string{"bash"},
[]string{"echo bar >/pfs/out/file"},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
true,
))
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipelineName, nil, nil)
require.NoError(t, err)
if len(jobInfos) != 2 {
return fmt.Errorf("expected 2 jobs, got %d", len(jobInfos))
}
jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)
return nil
}, backoff.NewTestingBackOff()))
}
func TestListJobOutput(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
dataRepo := tu.UniqueString("TestListJobOutput_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
&pps.ParallelismSpec{
Constant: 4,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
require.NoError(t, backoff.Retry(func() error {
jobInfos, err := c.ListJob("", nil, commitInfos[0].Commit)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("expected 1 job")
}
jobInfos, err = c.ListJob("", nil, client.NewCommit(pipeline, "master"))
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("expected 1 job")
}
return nil
}, backoff.NewTestingBackOff()))
}
func TestPipelineEnvVarAlias(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineEnvVarAlias_data")
require.NoError(t, c.CreateRepo(dataRepo))
numFiles := 10
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(fmt.Sprintf("%d", i)))
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"env",
fmt.Sprintf("cp $%s /pfs/out/", dataRepo),
},
nil,
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
for i := 0; i < numFiles; i++ {
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file-%d", i), 0, 0, &buf))
require.Equal(t, fmt.Sprintf("%d", i), buf.String())
}
}
func TestMaxQueueSize(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestMaxQueueSize_input")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
for i := 0; i < 20; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("TestMaxQueueSize_output")
// This pipeline sleeps for 10 secs per datum
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"sleep 5",
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
ParallelismSpec: &pps.ParallelismSpec{
Constant: 2,
},
MaxQueueSize: 1,
ChunkSpec: &pps.ChunkSpec{
Number: 10,
},
})
require.NoError(t, err)
var jobInfo *pps.JobInfo
for i := 0; i < 10; i++ {
require.NoError(t, backoff.Retry(func() error {
jobs, err := c.ListJob(pipeline, nil, nil)
if err != nil {
return fmt.Errorf("could not list job: %s", err.Error())
}
if len(jobs) == 0 {
return fmt.Errorf("failed to find job")
}
jobInfo, err = c.InspectJob(jobs[0].Job.ID, false)
if err != nil {
return fmt.Errorf("could not inspect job: %s", err.Error())
}
if len(jobInfo.WorkerStatus) != 2 {
return fmt.Errorf("incorrect number of statuses: %v", len(jobInfo.WorkerStatus))
}
return nil
}, backoff.RetryEvery(500*time.Millisecond).For(60*time.Second)))
for _, status := range jobInfo.WorkerStatus {
if status.QueueSize > 1 {
t.Fatalf("queue size too big: %d", status.QueueSize)
}
}
time.Sleep(500 * time.Millisecond)
}
}
func TestHTTPAuth(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
clientAddr := c.GetAddress()
host, _, err := net.SplitHostPort(clientAddr)
port, ok := os.LookupEnv("PACHD_SERVICE_PORT_API_HTTP_PORT")
if !ok {
port = "30652" // default NodePort port for Pachd's HTTP API
}
httpAPIAddr := net.JoinHostPort(host, port)
// Try to login
token := "abbazabbadoo"
form := url.Values{}
form.Add("Token", token)
req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/v1/auth/login", httpAPIAddr), strings.NewReader(form.Encode()))
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
require.NoError(t, err)
httpClient := &http.Client{}
resp, err := httpClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
require.Equal(t, 1, len(resp.Cookies()))
require.Equal(t, auth.ContextTokenKey, resp.Cookies()[0].Name)
require.Equal(t, "*", resp.Header.Get("Access-Control-Allow-Origin"))
require.Equal(t, token, resp.Cookies()[0].Value)
// Try to logout
req, err = http.NewRequest("POST", fmt.Sprintf("http://%s/v1/auth/logout", httpAPIAddr), nil)
require.NoError(t, err)
resp, err = httpClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
require.Equal(t, 1, len(resp.Cookies()))
require.Equal(t, auth.ContextTokenKey, resp.Cookies()[0].Name)
require.Equal(t, "*", resp.Header.Get("Access-Control-Allow-Origin"))
// The cookie should be unset now
require.Equal(t, "", resp.Cookies()[0].Value)
// Make sure we get 404s for non existent routes
req, err = http.NewRequest("POST", fmt.Sprintf("http://%s/v1/auth/logoutzz", httpAPIAddr), nil)
require.NoError(t, err)
resp, err = httpClient.Do(req)
require.NoError(t, err)
require.Equal(t, 404, resp.StatusCode)
}
func TestHTTPGetFile(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
dataRepo := tu.UniqueString("TestHTTPGetFile_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
f, err := os.Open("../../etc/testing/artifacts/giphy.gif")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "giphy.gif", f)
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
clientAddr := c.GetAddress()
host, _, err := net.SplitHostPort(clientAddr)
port, ok := os.LookupEnv("PACHD_SERVICE_PORT_API_HTTP_PORT")
if !ok {
port = "30652" // default NodePort port for Pachd's HTTP API
}
httpAPIAddr := net.JoinHostPort(host, port)
// Try to get raw contents
resp, err := http.Get(fmt.Sprintf("http://%s/v1/pfs/repos/%v/commits/%v/files/file", httpAPIAddr, dataRepo, commit1.ID))
require.NoError(t, err)
defer resp.Body.Close()
contents, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, "foo", string(contents))
contentDisposition := resp.Header.Get("Content-Disposition")
require.Equal(t, "", contentDisposition)
// Try to get file for downloading
resp, err = http.Get(fmt.Sprintf("http://%s/v1/pfs/repos/%v/commits/%v/files/file?download=true", httpAPIAddr, dataRepo, commit1.ID))
require.NoError(t, err)
defer resp.Body.Close()
contents, err = ioutil.ReadAll(resp.Body)
require.NoError(t, err)
require.Equal(t, "foo", string(contents))
contentDisposition = resp.Header.Get("Content-Disposition")
require.Equal(t, "attachment; filename=\"file\"", contentDisposition)
// Make sure MIME type is set
resp, err = http.Get(fmt.Sprintf("http://%s/v1/pfs/repos/%v/commits/%v/files/giphy.gif", httpAPIAddr, dataRepo, commit1.ID))
require.NoError(t, err)
defer resp.Body.Close()
contentDisposition = resp.Header.Get("Content-Type")
require.Equal(t, "image/gif", contentDisposition)
}
func TestService(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestService_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
_, err = c.PutFile(dataRepo, commit1.ID, "file1", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("pipelineservice")
// This pipeline sleeps for 10 secs per datum
require.NoError(t, c.CreatePipelineService(
pipeline,
"trinitronx/python-simplehttpserver",
[]string{"sh"},
[]string{
"cd /pfs",
"exec python -m SimpleHTTPServer 8000",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
false,
8000,
31800,
))
time.Sleep(10 * time.Second)
// Lookup the address for 'pipelineservice' (different inside vs outside k8s)
serviceAddr := func() string {
// Hack: detect if running inside the cluster by looking for this env var
if _, ok := os.LookupEnv("KUBERNETES_PORT"); !ok {
// Outside cluster: Re-use external IP and external port defined above
clientAddr := c.GetAddress()
host, _, err := net.SplitHostPort(clientAddr)
require.NoError(t, err)
return net.JoinHostPort(host, "31800")
}
// Get k8s service corresponding to pachyderm service above--must access
// via internal cluster IP, but we don't know what that is
var address string
kubeClient := getKubeClient(t)
backoff.Retry(func() error {
svcs, err := kubeClient.CoreV1().Services("default").List(metav1.ListOptions{})
require.NoError(t, err)
for _, svc := range svcs.Items {
// Pachyderm actually generates two services for pipelineservice: one
// for pachyderm (a ClusterIP service) and one for the user container
// (a NodePort service, which is the one we want)
rightName := strings.Contains(svc.Name, "pipelineservice")
rightType := svc.Spec.Type == v1.ServiceTypeNodePort
if !rightName || !rightType {
continue
}
host := svc.Spec.ClusterIP
port := fmt.Sprintf("%d", svc.Spec.Ports[0].Port)
address = net.JoinHostPort(host, port)
return nil
}
return fmt.Errorf("no matching k8s service found")
}, backoff.NewTestingBackOff())
require.NotEqual(t, "", address)
return address
}()
require.NoError(t, backoff.Retry(func() error {
resp, err := http.Get(fmt.Sprintf("http://%s/%s/file1", serviceAddr, dataRepo))
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("GET returned %d", resp.StatusCode)
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if string(content) != "foo" {
return fmt.Errorf("wrong content for file1: expected foo, got %s", string(content))
}
return nil
}, backoff.NewTestingBackOff()))
clientAddr := c.GetAddress()
host, _, err := net.SplitHostPort(clientAddr)
port, ok := os.LookupEnv("PACHD_SERVICE_PORT_API_HTTP_PORT")
if !ok {
port = "30652" // default NodePort port for Pachd's HTTP API
}
httpAPIAddr := net.JoinHostPort(host, port)
url := fmt.Sprintf("http://%s/v1/pps/services/%s/%s/file1", httpAPIAddr, pipeline, dataRepo)
require.NoError(t, backoff.Retry(func() error {
resp, err := http.Get(url)
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("GET returned %d", resp.StatusCode)
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if string(content) != "foo" {
return fmt.Errorf("wrong content for file1: expected foo, got %s", string(content))
}
return nil
}, backoff.NewTestingBackOff()))
commit2, err := c.StartCommit(dataRepo, "master")
_, err = c.PutFile(dataRepo, commit2.ID, "file2", strings.NewReader("bar"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit2.ID))
require.NoError(t, backoff.Retry(func() error {
resp, err := http.Get(fmt.Sprintf("http://%s/%s/file2", serviceAddr, dataRepo))
if err != nil {
return err
}
if resp.StatusCode != 200 {
return fmt.Errorf("GET returned %d", resp.StatusCode)
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if string(content) != "bar" {
return fmt.Errorf("wrong content for file2: expected bar, got %s", string(content))
}
return nil
}, backoff.NewTestingBackOff()))
}
func TestChunkSpec(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestChunkSpec_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
numFiles := 101
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
t.Run("number", func(t *testing.T) {
pipeline := tu.UniqueString("TestChunkSpec")
c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
ChunkSpec: &pps.ChunkSpec{Number: 1},
})
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
for i := 0; i < numFiles; i++ {
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file%d", i), 0, 0, &buf))
require.Equal(t, "foo", buf.String())
}
})
t.Run("size", func(t *testing.T) {
pipeline := tu.UniqueString("TestChunkSpec")
c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
ChunkSpec: &pps.ChunkSpec{SizeBytes: 5},
})
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
for i := 0; i < numFiles; i++ {
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file%d", i), 0, 0, &buf))
require.Equal(t, "foo", buf.String())
}
})
}
func TestLongDatums(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestLongDatums_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
numFiles := 8
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("TestLongDatums")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"sleep 1m",
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
&pps.ParallelismSpec{
Constant: 4,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
for i := 0; i < numFiles; i++ {
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file%d", i), 0, 0, &buf))
require.Equal(t, "foo", buf.String())
}
}
func TestPipelineWithGitInputInvalidURLs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
// Of the common git URL types (listed below), only the 'clone' url is supported RN
// (for several reasons, one of which is that we can't assume we have SSH / an ssh env setup on the user container)
//git_url: "git://github.com/sjezewski/testgithook.git",
//ssh_url: "git@github.com:sjezewski/testgithook.git",
//svn_url: "https://github.com/sjezewski/testgithook",
//clone_url: "https://github.com/sjezewski/testgithook.git",
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "git://github.com/pachyderm/test-artifacts.git",
},
},
"",
false,
))
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "git@github.com:pachyderm/test-artifacts.git",
},
},
"",
false,
))
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com:pachyderm/test-artifacts",
},
},
"",
false,
))
}
func TestPipelineWithGitInputPrivateGHRepo(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
repoName := "pachyderm-dummy"
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/%v/.git/HEAD > /pfs/out/%v", repoName, outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: fmt.Sprintf("https://github.com/pachyderm/%v.git", repoName),
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
repos, err := c.ListRepo()
require.NoError(t, err)
found := false
for _, repo := range repos {
if repo.Repo.Name == repoName {
found = true
}
}
require.Equal(t, true, found)
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/private.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
// Now there should NOT be a new commit on the pachyderm repo
commits, err := c.ListCommit(repoName, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
// We should see that the pipeline has failed
pipelineInfo, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, pps.PipelineState_PIPELINE_FAILURE, pipelineInfo.State)
require.Equal(t, fmt.Sprintf("unable to clone private github repo (https://github.com/pachyderm/%v.git)", repoName), pipelineInfo.Reason)
}
func TestPipelineWithGitInputDuplicateNames(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
//Test same name on one pipeline
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/pachyderm/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Cross: []*pps.Input{
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
Name: "foo",
},
},
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
Name: "foo",
},
},
},
},
"",
false,
))
//Test same URL on one pipeline
require.YesError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/pachyderm/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Cross: []*pps.Input{
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
},
},
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
},
},
},
},
"",
false,
))
// Test same URL but different names
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/pachyderm/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Cross: []*pps.Input{
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
Name: "foo",
},
},
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
},
},
},
},
"",
false,
))
}
func TestPipelineWithGitInput(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
_, err := c.InspectRepo("test-artifacts")
require.NoError(t, err)
commits, err := c.ListCommit("test-artifacts", "master", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
branches, err := c.ListBranch("test-artifacts")
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, "master", branches[0].Name)
commit := branches[0].Head
// Now wait for the pipeline complete as normal
outputRepo := &pfs.Repo{Name: pipeline}
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, []*pfs.Repo{outputRepo})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit = commitInfos[0].Commit
var buf bytes.Buffer
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String()))
}
func TestPipelineWithGitInputSequentialPushes(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
_, err := c.InspectRepo("test-artifacts")
require.NoError(t, err)
commits, err := c.ListCommit("test-artifacts", "master", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
branches, err := c.ListBranch("test-artifacts")
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, "master", branches[0].Name)
commit := branches[0].Head
// Now wait for the pipeline complete as normal
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit = commitInfos[0].Commit
var buf bytes.Buffer
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String()))
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master-2.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
branches, err = c.ListBranch("test-artifacts")
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, "master", branches[0].Name)
commit = branches[0].Head
// Now wait for the pipeline complete as normal
commitIter, err = c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit = commitInfos[0].Commit
buf.Reset()
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "162963b4adf00cd378488abdedc085ba08e21674", strings.TrimSpace(buf.String()))
}
func TestPipelineWithGitInputCustomName(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
repoName := "foo"
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/%v/.git/HEAD > /pfs/out/%v", repoName, outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
Name: repoName,
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
_, err := c.InspectRepo(repoName)
require.NoError(t, err)
commits, err := c.ListCommit(repoName, "", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
branches, err := c.ListBranch(repoName)
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, "master", branches[0].Name)
commit := branches[0].Head
// Now wait for the pipeline complete as normal
outputRepo := &pfs.Repo{Name: pipeline}
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, []*pfs.Repo{outputRepo})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit = commitInfos[0].Commit
var buf bytes.Buffer
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String()))
}
func TestPipelineWithGitInputMultiPipelineSeparateInputs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
repos := []string{"pachyderm", "foo"}
pipelines := []string{
tu.UniqueString("github_pipeline_a_"),
tu.UniqueString("github_pipeline_b_"),
}
for i, repoName := range repos {
require.NoError(t, c.CreatePipeline(
pipelines[i],
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/%v/.git/HEAD > /pfs/out/%v", repoName, outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
Name: repoName,
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
repos, err := c.ListRepo()
require.NoError(t, err)
found := false
for _, repo := range repos {
if repo.Repo.Name == repoName {
found = true
}
}
require.Equal(t, true, found)
commits, err := c.ListCommit(repoName, "", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
}
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
for i, repoName := range repos {
// Now there should be a new commit on the pachyderm repo / master branch
branches, err := c.ListBranch(repoName)
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, "master", branches[0].Name)
commit := branches[0].Head
// Now wait for the pipeline complete as normal
outputRepo := &pfs.Repo{Name: pipelines[i]}
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, []*pfs.Repo{outputRepo})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit = commitInfos[0].Commit
var buf bytes.Buffer
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String()))
}
}
func TestPipelineWithGitInputMultiPipelineSameInput(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
outputFilename := "commitSHA"
repos := []string{"test-artifacts", "test-artifacts"}
pipelines := []string{
tu.UniqueString("github_pipeline_a_"),
tu.UniqueString("github_pipeline_b_"),
}
for i, repoName := range repos {
require.NoError(t, c.CreatePipeline(
pipelines[i],
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/%v/.git/HEAD > /pfs/out/%v", repoName, outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
repos, err := c.ListRepo()
require.NoError(t, err)
found := false
for _, repo := range repos {
if repo.Repo.Name == repoName {
found = true
}
}
require.Equal(t, true, found)
commits, err := c.ListCommit(repoName, "", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
}
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(2 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
branches, err := c.ListBranch(repos[0])
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, "master", branches[0].Name)
commit := branches[0].Head
// Now wait for the pipeline complete as normal
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 2, len(commitInfos))
commit = commitInfos[0].Commit
for _, commitInfo := range commitInfos {
commit = commitInfo.Commit
var buf bytes.Buffer
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String()))
}
}
func TestPipelineWithGitInputAndBranch(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
branchName := "foo"
outputFilename := "commitSHA"
pipeline := tu.UniqueString("github_pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename),
},
nil,
&pps.Input{
Git: &pps.GitInput{
URL: "https://github.com/pachyderm/test-artifacts.git",
Branch: branchName,
},
},
"",
false,
))
// There should be a pachyderm repo created w no commits:
_, err := c.InspectRepo("test-artifacts")
require.NoError(t, err)
// Make sure a push to master does NOT trigger this pipeline
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(5 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
_, err = c.InspectBranch("test-artifacts", "master")
require.YesError(t, err)
// To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server
simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/branch.json")
// Need to sleep since the webhook http handler is non blocking
time.Sleep(5 * time.Second)
// Now there should be a new commit on the pachyderm repo / master branch
branches, err := c.ListBranch("test-artifacts")
require.NoError(t, err)
require.Equal(t, 1, len(branches))
require.Equal(t, branchName, branches[0].Name)
commit := branches[0].Head
require.NotNil(t, commit)
// Now wait for the pipeline complete as normal
outputRepo := &pfs.Repo{Name: pipeline}
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, []*pfs.Repo{outputRepo})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commit = commitInfos[0].Commit
var buf bytes.Buffer
require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf))
require.Equal(t, "81269575dcfc6ac2e2a463ad8016163f79c97f5c", strings.TrimSpace(buf.String()))
}
func TestPipelineWithDatumTimeout(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithDatumTimeout_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file",
strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
timeout := 20
pipeline := tu.UniqueString("pipeline")
duration, err := time.ParseDuration(fmt.Sprintf("%vs", timeout))
require.NoError(t, err)
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
"while true; do sleep 1; date; done",
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
DatumTimeout: types.DurationProto(duration),
},
)
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Block on the job being complete before we call ListDatum
jobInfo, err := c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State)
// Now validate the datum timed out properly
resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0)
require.NoError(t, err)
require.Equal(t, 1, len(resp.DatumInfos))
datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID)
require.NoError(t, err)
require.Equal(t, pps.DatumState_FAILED, datum.State)
// ProcessTime looks like "20 seconds"
tokens := strings.Split(pretty.Duration(datum.Stats.ProcessTime), " ")
require.Equal(t, 2, len(tokens))
seconds, err := strconv.Atoi(tokens[0])
require.NoError(t, err)
require.Equal(t, timeout, seconds)
}
func TestPipelineWithDatumTimeoutControl(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithDatumTimeoutControl_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file",
strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
timeout := 20
pipeline := tu.UniqueString("pipeline")
duration, err := time.ParseDuration(fmt.Sprintf("%vs", timeout))
require.NoError(t, err)
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("sleep %v", timeout-10),
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
DatumTimeout: types.DurationProto(duration),
},
)
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Block on the job being complete before we call ListDatum
jobInfo, err := c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State)
}
func TestPipelineWithJobTimeout(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithDatumTimeout_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
numFiles := 2
for i := 0; i < numFiles; i++ {
_, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%v", i),
strings.NewReader("foo"))
require.NoError(t, err)
}
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
timeout := 20
pipeline := tu.UniqueString("pipeline")
duration, err := time.ParseDuration(fmt.Sprintf("%vs", timeout))
require.NoError(t, err)
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"bash"},
Stdin: []string{
fmt.Sprintf("sleep %v", timeout), // we have 2 datums, so the total exec time will more than double the timeout value
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
JobTimeout: types.DurationProto(duration),
},
)
require.NoError(t, err)
// Wait for the job to get scheduled / appear in listjob
// A sleep of 15s is insufficient
time.Sleep(25 * time.Second)
jobs, err := c.ListJob(pipeline, nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobs))
// Block on the job being complete before we call ListDatum
jobInfo, err := c.InspectJob(jobs[0].Job.ID, true)
require.NoError(t, err)
require.Equal(t, pps.JobState_JOB_KILLED.String(), jobInfo.State.String())
started, err := types.TimestampFromProto(jobInfo.Started)
require.NoError(t, err)
finished, err := types.TimestampFromProto(jobInfo.Finished)
require.NoError(t, err)
require.True(t, math.Abs((finished.Sub(started)-(time.Second*20)).Seconds()) <= 1.0)
}
func TestCommitDescription(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
dataRepo := tu.UniqueString("TestCommitDescription")
require.NoError(t, c.CreateRepo(dataRepo))
// Test putting a message in StartCommit
commit, err := c.PfsAPIClient.StartCommit(ctx, &pfs.StartCommitRequest{
Branch: "master",
Parent: client.NewCommit(dataRepo, ""),
Description: "test commit description in start-commit",
})
require.NoError(t, err)
c.FinishCommit(dataRepo, commit.ID)
commitInfo, err := c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
require.Equal(t, "test commit description in start-commit", commitInfo.Description)
require.NoError(t, pfspretty.PrintDetailedCommitInfo(commitInfo))
// Test putting a message in FinishCommit
commit, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
c.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{
Commit: commit,
Description: "test commit description in finish-commit",
})
commitInfo, err = c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
require.Equal(t, "test commit description in finish-commit", commitInfo.Description)
require.NoError(t, pfspretty.PrintDetailedCommitInfo(commitInfo))
// Test overwriting a commit message
commit, err = c.PfsAPIClient.StartCommit(ctx, &pfs.StartCommitRequest{
Branch: "master",
Parent: client.NewCommit(dataRepo, ""),
Description: "test commit description in start-commit",
})
require.NoError(t, err)
c.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{
Commit: commit,
Description: "test commit description in finish-commit that overwrites",
})
commitInfo, err = c.InspectCommit(dataRepo, commit.ID)
require.NoError(t, err)
require.Equal(t, "test commit description in finish-commit that overwrites", commitInfo.Description)
require.NoError(t, pfspretty.PrintDetailedCommitInfo(commitInfo))
}
func TestGetFileWithEmptyCommits(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
repoName := tu.UniqueString("TestGetFileWithEmptyCommits")
require.NoError(t, c.CreateRepo(repoName))
// Create a real commit in repoName/master
commit, err := c.StartCommit(repoName, "master")
require.NoError(t, err)
_, err = c.PutFile(repoName, commit.ID, "/file", strings.NewReader("data contents"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repoName, commit.ID))
// Create an empty commit in repoName/master
commit, err = c.StartCommit(repoName, "master")
c.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{
Commit: commit,
Empty: true,
})
// We get a "file not found" error when we try to get a file from repoName/master
buf := bytes.Buffer{}
err = c.GetFile(repoName, "master", "/file", 0, 0, &buf)
require.YesError(t, err)
require.True(t, strings.Contains(err.Error(), "not found"))
}
func TestPipelineDescription(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineDescription_data")
require.NoError(t, c.CreateRepo(dataRepo))
description := "pipeline description"
pipeline := tu.UniqueString("TestPipelineDescription")
_, err := c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{Cmd: []string{"true"}},
Description: description,
Input: client.NewAtomInput(dataRepo, "/"),
})
require.NoError(t, err)
pi, err := c.InspectPipeline(pipeline)
require.NoError(t, err)
require.Equal(t, description, pi.Description)
}
func TestListJobInputCommits(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
aRepo := tu.UniqueString("TestListJobInputCommits_data_a")
require.NoError(t, c.CreateRepo(aRepo))
bRepo := tu.UniqueString("TestListJobInputCommits_data_b")
require.NoError(t, c.CreateRepo(bRepo))
pipeline := tu.UniqueString("TestListJobInputCommits")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", aRepo),
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", bRepo),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewCrossInput(
client.NewAtomInput(aRepo, "/*"),
client.NewAtomInput(bRepo, "/*"),
),
"",
false,
))
commita1, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, "master", "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, "master"))
commitb1, err := c.StartCommit(bRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(bRepo, "master", "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(bRepo, "master"))
commitIter, err := c.FlushCommit([]*pfs.Commit{commita1, commitb1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commita2, err := c.StartCommit(aRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(aRepo, "master", "file", strings.NewReader("bar"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(aRepo, "master"))
commitIter, err = c.FlushCommit([]*pfs.Commit{commita2, commitb1}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
commitb2, err := c.StartCommit(bRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(bRepo, "master", "file", strings.NewReader("bar"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(bRepo, "master"))
commitIter, err = c.FlushCommit([]*pfs.Commit{commita2, commitb2}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
jobInfos, err := c.ListJob("", []*pfs.Commit{commita1}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobInfos)) // a1 + nil and a1 + b1
jobInfos, err = c.ListJob("", []*pfs.Commit{commitb1}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobInfos)) // a1 + b1 and a2 + b1
jobInfos, err = c.ListJob("", []*pfs.Commit{commita2}, nil)
require.NoError(t, err)
require.Equal(t, 2, len(jobInfos)) // a2 + b1 and a2 + b2
jobInfos, err = c.ListJob("", []*pfs.Commit{commitb2}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos)) // a2 + b2
jobInfos, err = c.ListJob("", []*pfs.Commit{commita1, commitb1}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
jobInfos, err = c.ListJob("", []*pfs.Commit{commita2, commitb1}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
jobInfos, err = c.ListJob("", []*pfs.Commit{commita2, commitb2}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
jobInfos, err = c.ListJob("", []*pfs.Commit{client.NewCommit(aRepo, "master"), client.NewCommit(bRepo, "master")}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
}
func TestManyJobs(t *testing.T) {
t.Skip("This test is too long to be run as part of CI")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestManyJobs_data")
require.NoError(t, c.CreateRepo(dataRepo))
numPipelines := 10
for i := 0; i < numPipelines; i++ {
pipeline := tu.UniqueString("TestManyJobs")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"true"},
[]string{strings.Repeat("words ", 30), strings.Repeat("words ", 30), strings.Repeat("words ", 30), strings.Repeat("words ", 30), strings.Repeat("words ", 30), strings.Repeat("words ", 30)},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
}
numCommits := 5000
for i := 0; i < numCommits; i++ {
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
}
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
_, err = c.ListJob("", nil, nil)
require.NoError(t, err)
}
func TestExtractRestore(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestExtractRestore_data")
require.NoError(t, c.CreateRepo(dataRepo))
nCommits := 2
r := rand.New(rand.NewSource(45))
fileContent := workload.RandString(r, 40*MB)
for i := 0; i < nCommits; i++ {
_, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, "master", fmt.Sprintf("file-%d", i), strings.NewReader(fileContent))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
}
numPipelines := 3
input := dataRepo
for i := 0; i < numPipelines; i++ {
pipeline := tu.UniqueString(fmt.Sprintf("TestExtractRestore%d", i))
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(input, "/*"),
"",
false,
))
input = pipeline
}
commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, numPipelines, len(commitInfos))
ops, err := c.ExtractAll(false)
require.NoError(t, err)
require.NoError(t, c.DeleteAll())
require.NoError(t, c.Restore(ops))
commitIter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil)
require.NoError(t, err)
commitInfos = collectCommitInfos(t, commitIter)
require.Equal(t, numPipelines, len(commitInfos))
}
// TestCancelJob creates a long-running job and then kills it, testing that the
// user process is killed.
func TestCancelJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// Create an input repo
repo := tu.UniqueString("TestCancelJob")
require.NoError(t, c.CreateRepo(repo))
// Create an input commit
commit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "/time", strings.NewReader("600"))
require.NoError(t, err)
_, err = c.PutFile(repo, commit.ID, "/data", strings.NewReader("commit data"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit.ID))
// Create sleep + copy pipeline
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"sleep `cat /pfs/*/time`",
"cp /pfs/*/data /pfs/out/",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/"),
"",
false,
))
// Wait until PPS has started processing commit
var jobInfo *pps.JobInfo
require.NoErrorWithinT(t, 30*time.Second, func() error {
return backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipeline, []*pfs.Commit{commit}, nil)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("Expected one job, but got %d: %v", len(jobInfos), jobInfos)
}
jobInfo = jobInfos[0]
return nil
}, backoff.NewTestingBackOff())
})
// stop the job
require.NoError(t, c.StopJob(jobInfo.Job.ID))
// Wait until the job is cancelled
require.NoErrorWithinT(t, 30*time.Second, func() error {
return backoff.Retry(func() error {
updatedJobInfo, err := c.InspectJob(jobInfo.Job.ID, false)
if err != nil {
return err
}
if updatedJobInfo.State != pps.JobState_JOB_KILLED {
return fmt.Errorf("job %s is still running, but should be KILLED", jobInfo.Job.ID)
}
return nil
}, backoff.NewTestingBackOff())
})
// Create one more commit to make sure the pipeline can still process input
// commits
commit2, err := c.StartCommit(repo, "master")
require.NoError(t, err)
require.NoError(t, c.DeleteFile(repo, commit2.ID, "/time"))
_, err = c.PutFile(repo, commit2.ID, "/time", strings.NewReader("1"))
require.NoError(t, err)
require.NoError(t, c.DeleteFile(repo, commit2.ID, "/data"))
_, err = c.PutFile(repo, commit2.ID, "/data", strings.NewReader("commit 2 data"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit2.ID))
// Flush commit2, and make sure the output is as expected
iter, err := c.FlushCommit([]*pfs.Commit{commit2}, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, iter)
require.Equal(t, 1, len(commitInfos))
buf := bytes.Buffer{}
err = c.GetFile(pipeline, commitInfos[0].Commit.ID, "/data", 0, 0, &buf)
require.NoError(t, err)
require.Equal(t, "commit 2 data", buf.String())
}
// TestCancelManyJobs creates many jobs to test that the handling of many
// incoming job events is correct. Each job comes up (which tests that that
// cancelling job 'a' does not cancel subsequent job 'b'), must be the only job
// running (which tests that only one job can run at a time), and then is
// cancelled.
func TestCancelManyJobs(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// Create an input repo
repo := tu.UniqueString("TestCancelManyJobs")
require.NoError(t, c.CreateRepo(repo))
// Create sleep pipeline
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"sleep", "600"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/"),
"",
false,
))
// Create 10 input commits, to spawn 10 jobs
var commits [10]*pfs.Commit
var err error
for i := 0; i < 10; i++ {
commits[i], err = c.StartCommit(repo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commits[i].ID))
}
// For each expected job: watch to make sure the input job comes up, make
// sure that it's the only job running, then cancel it
for _, commit := range commits {
// Wait until PPS has started processing commit
var jobInfo *pps.JobInfo
require.NoErrorWithinT(t, 30*time.Second, func() error {
return backoff.Retry(func() error {
jobInfos, err := c.ListJob(pipeline, []*pfs.Commit{commit}, nil)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("Expected one job, but got %d: %v", len(jobInfos), jobInfos)
}
jobInfo = jobInfos[0]
return nil
}, backoff.NewTestingBackOff())
})
// Stop the job
require.NoError(t, c.StopJob(jobInfo.Job.ID))
// Check that the job is now killed
require.NoErrorWithinT(t, 30*time.Second, func() error {
return backoff.Retry(func() error {
// TODO(msteffen): once github.com/pachyderm/pachyderm/pull/2642 is
// submitted, change ListJob here to filter on commit1 as the input commit,
// rather than inspecting the input in the test
updatedJobInfo, err := c.InspectJob(jobInfo.Job.ID, false)
if err != nil {
return err
}
if updatedJobInfo.State != pps.JobState_JOB_KILLED {
return fmt.Errorf("job %s is still running, but should be KILLED", jobInfo.Job.ID)
}
return nil
}, backoff.NewTestingBackOff())
})
}
}
// TestDeleteCommitPropagation deletes an input commit and makes sure all
// downstream commits are also deleted.
// DAG in this test: repo -> pipeline[0] -> pipeline[1]
func TestDeleteCommitPropagation(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// Create an input repo
repo := tu.UniqueString("TestDeleteCommitPropagation")
require.NoError(t, c.CreateRepo(repo))
// Create two copy pipelines
numPipelines, numCommits := 2, 2
pipeline := make([]string, numPipelines)
for i := 0; i < numPipelines; i++ {
pipeline[i] = tu.UniqueString(fmt.Sprintf("pipeline%d_", i))
input := []string{repo, pipeline[0]}[i]
require.NoError(t, c.CreatePipeline(
pipeline[i],
"",
[]string{"bash"},
[]string{"cp /pfs/*/* /pfs/out/"},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(input, "/*"),
"",
false,
))
}
// Commit twice to the input repo, creating 4 jobs and 4 output commits
commit := make([]*pfs.Commit, numCommits)
var err error
for i := 0; i < numCommits; i++ {
commit[i], err = c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit[i].ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit[i].ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit[i]}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 2, len(commitInfos))
}
// Delete the first commit in the input repo (not master, but its parent)
// Make sure that 'repo' and all downstream repos only have one commit now.
// This ensures that commits' parents are updated
commits, err := c.ListCommit(repo, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 2, len(commits))
require.NoError(t, c.DeleteCommit(repo, commit[0].ID))
for _, r := range []string{repo, pipeline[0], pipeline[1]} {
commits, err := c.ListCommit(r, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 1, len(commits))
require.Nil(t, commits[0].ParentCommit)
}
jis, err := c.ListJob(pipeline[0], nil, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jis))
// Delete the second commit in the input repo (master)
// Make sure that 'repo' and all downstream repos have no commits. This
// ensures that branches are updated.
require.NoError(t, c.DeleteCommit(repo, "master"))
for _, r := range []string{repo, pipeline[0], pipeline[1]} {
commits, err := c.ListCommit(r, "master", "", 0)
require.NoError(t, err)
require.Equal(t, 0, len(commits))
}
// Make one more input commit, to be sure that the branches are still
// connected properly
finalCommit, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, finalCommit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, finalCommit.ID))
commitIter, err := c.FlushCommit([]*pfs.Commit{finalCommit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 2, len(commitInfos))
}
// TestDeleteCommitRunsJob creates an input reo, commits several times, and then
// creates a pipeline. Creating the pipeline will spawn a job and while that
// job is running, this test deletes the HEAD commit of the input branch, which
// deletes the job's output commit and cancels the job. This should start
// another pipeline that processes the original input HEAD commit's parent.
func TestDeleteCommitRunsJob(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
// Create an input repo
repo := tu.UniqueString("TestDeleteCommitRunsJob")
require.NoError(t, c.CreateRepo(repo))
// Create two input commits. The input commit has two files: 'time' which
// determines how long the processing job runs for, and 'data' which
// determines the job's output. This ensures that the first job (processing
// the second commit) runs for a long time, making it easy to cancel, while
// the second job runs quickly, ensuring that the test finishes quickly
commit1, err := c.StartCommit(repo, "master")
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "/time", strings.NewReader("1"))
require.NoError(t, err)
_, err = c.PutFile(repo, commit1.ID, "/data", strings.NewReader("commit 1 data"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit1.ID))
commit2, err := c.StartCommit(repo, "master")
require.NoError(t, err)
require.NoError(t, c.DeleteFile(repo, commit2.ID, "/time"))
_, err = c.PutFile(repo, commit2.ID, "/time", strings.NewReader("600"))
require.NoError(t, err)
require.NoError(t, c.DeleteFile(repo, commit2.ID, "/data"))
_, err = c.PutFile(repo, commit2.ID, "/data", strings.NewReader("commit 2 data"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit2.ID))
// Create sleep + copy pipeline
pipeline := tu.UniqueString("pipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
"sleep `cat /pfs/*/time`",
"cp /pfs/*/data /pfs/out/",
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(repo, "/"),
"",
false,
))
// Wait until PPS has started processing commit2
require.NoErrorWithinT(t, 30*time.Second, func() error {
return backoff.Retry(func() error {
// TODO(msteffen): once github.com/pachyderm/pachyderm/pull/2642 is
// submitted, change ListJob here to filter on commit1 as the input commit,
// rather than inspecting the input in the test
jobInfos, err := c.ListJob(pipeline, nil, nil)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("Expected one job, but got %d: %v", len(jobInfos), jobInfos)
}
pps.VisitInput(jobInfos[0].Input, func(input *pps.Input) {
if input.Atom == nil {
err = fmt.Errorf("expected a single atom input, but got: %v", jobInfos[0].Input)
return
}
if input.Atom.Commit != commit2.ID {
err = fmt.Errorf("expected job to process %s, but instead processed: %s", commit2.ID, jobInfos[0].Input)
return
}
})
return err
}, backoff.NewTestingBackOff())
})
// Delete the first commit in the input repo
require.NoError(t, c.DeleteCommit(repo, commit2.ID))
// Wait until PPS has started processing commit1
require.NoErrorWithinT(t, 30*time.Second, func() error {
return backoff.Retry(func() error {
// TODO(msteffen): as above, change ListJob here to filter on commit2 as
// the input, rather than inspecting the input in the test
jobInfos, err := c.ListJob(pipeline, nil, nil)
if err != nil {
return err
}
if len(jobInfos) != 1 {
return fmt.Errorf("Expected one job, but got %d: %v", len(jobInfos), jobInfos)
}
pps.VisitInput(jobInfos[0].Input, func(input *pps.Input) {
if input.Atom == nil {
err = fmt.Errorf("expected a single atom input, but got: %v", jobInfos[0].Input)
return
}
if input.Atom.Commit != commit1.ID {
err = fmt.Errorf("expected job to process %s, but instead processed: %s", commit1.ID, jobInfos[0].Input)
return
}
})
return err
}, backoff.NewTestingBackOff())
})
iter, err := c.FlushCommit([]*pfs.Commit{commit1}, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos := collectCommitInfos(t, iter)
require.Equal(t, 1, len(commitInfos))
// Check that the job processed the right data
buf := bytes.Buffer{}
err = c.GetFile(repo, "master", "/data", 0, 0, &buf)
require.NoError(t, err)
require.Equal(t, "commit 1 data", buf.String())
// Create one more commit to make sure the pipeline can still process input
// commits
commit3, err := c.StartCommit(repo, "master")
require.NoError(t, err)
require.NoError(t, c.DeleteFile(repo, commit3.ID, "/data"))
_, err = c.PutFile(repo, commit3.ID, "/data", strings.NewReader("commit 3 data"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(repo, commit3.ID))
// Flush commit3, and make sure the output is as expected
iter, err = c.FlushCommit([]*pfs.Commit{commit3}, []*pfs.Repo{client.NewRepo(pipeline)})
require.NoError(t, err)
commitInfos = collectCommitInfos(t, iter)
require.Equal(t, 1, len(commitInfos))
buf.Reset()
err = c.GetFile(pipeline, commitInfos[0].Commit.ID, "/data", 0, 0, &buf)
require.NoError(t, err)
require.Equal(t, "commit 3 data", buf.String())
}
func TestEntryPoint(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestEntryPoint_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("TestSimplePipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"pachyderm_entrypoint",
nil,
nil,
&pps.ParallelismSpec{
Constant: 1,
},
&pps.Input{
Atom: &pps.AtomInput{
Name: "in",
Repo: dataRepo,
Glob: "/*",
},
},
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf))
require.Equal(t, "foo", buf.String())
}
func TestDeleteSpecRepo(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
dataRepo := tu.UniqueString("TestDeleteSpecRepo_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline := tu.UniqueString("TestSimplePipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"pachyderm_entrypoint",
[]string{"echo", "foo"},
nil,
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
require.YesError(t, c.DeleteRepo(ppsconsts.SpecRepo, false))
}
func TestUserWorkingDir(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
defer require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestUserWorkingDir_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipeline := tu.UniqueString("TestSimplePipeline")
_, err = c.PpsAPIClient.CreatePipeline(
context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Image: "pachyderm_entrypoint",
Cmd: []string{"bash"},
Stdin: []string{
"ls -lh /pfs",
"whoami >/pfs/out/whoami",
"pwd >/pfs/out/pwd",
fmt.Sprintf("cat /pfs/%s/file >/pfs/out/file", dataRepo),
},
User: "test",
WorkingDir: "/home/test",
},
Input: client.NewAtomInput(dataRepo, "/"),
})
require.NoError(t, err)
commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
var buf bytes.Buffer
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "whoami", 0, 0, &buf))
require.Equal(t, "test\n", buf.String())
buf.Reset()
require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "pwd", 0, 0, &buf))
require.Equal(t, "/home/test\n", buf.String())
}
func TestDontReadStdin(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
dataRepo := tu.UniqueString("TestDontReadStdin_data")
require.NoError(t, c.CreateRepo(dataRepo))
pipeline := tu.UniqueString("TestDontReadStdin")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"true"},
[]string{"stdin that will never be read"},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/"),
"",
false,
))
numCommits := 20
for i := 0; i < numCommits; i++ {
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, "master"))
jobInfos, err := c.FlushJobAll([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jobInfos))
require.Equal(t, jobInfos[0].State.String(), pps.JobState_JOB_SUCCESS.String())
}
}
func TestStatsDeleteAll(t *testing.T) {
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
dataRepo := tu.UniqueString("TestPipelineWithStats_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
pipeline := tu.UniqueString("pipeline")
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"cp", fmt.Sprintf("/pfs/%s/file", dataRepo), "/pfs/out"},
},
Input: client.NewAtomInput(dataRepo, "/"),
EnableStats: true,
})
jis, err := c.FlushJobAll([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jis))
require.Equal(t, pps.JobState_JOB_SUCCESS.String(), jis[0].State.String())
require.NoError(t, c.DeleteAll())
require.NoError(t, c.CreateRepo(dataRepo))
commit, err = c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit.ID))
_, err = c.PpsAPIClient.CreatePipeline(context.Background(),
&pps.CreatePipelineRequest{
Pipeline: client.NewPipeline(pipeline),
Transform: &pps.Transform{
Cmd: []string{"cp", fmt.Sprintf("/pfs/%s/file", dataRepo), "/pfs/out"},
},
Input: client.NewAtomInput(dataRepo, "/*"),
EnableStats: true,
})
jis, err = c.FlushJobAll([]*pfs.Commit{commit}, nil)
require.NoError(t, err)
require.Equal(t, 1, len(jis))
require.Equal(t, pps.JobState_JOB_SUCCESS.String(), jis[0].State.String())
require.NoError(t, c.DeleteAll())
}
func TestCorruption(t *testing.T) {
t.Skip("This test takes too long to run on CI.")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
etcdClient := getEtcdClient(t)
c := getPachClient(t)
require.NoError(t, c.DeleteAll())
r := rand.New(rand.NewSource(128))
for i := 0; i < 100; i++ {
dataRepo := tu.UniqueString("TestSimplePipeline_data")
require.NoError(t, c.CreateRepo(dataRepo))
commit1, err := c.StartCommit(dataRepo, "master")
require.NoError(t, err)
_, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo"))
require.NoError(t, err)
require.NoError(t, c.FinishCommit(dataRepo, commit1.ID))
pipeline := tu.UniqueString("TestSimplePipeline")
require.NoError(t, c.CreatePipeline(
pipeline,
"",
[]string{"bash"},
[]string{
fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo),
},
&pps.ParallelismSpec{
Constant: 1,
},
client.NewAtomInput(dataRepo, "/*"),
"",
false,
))
commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil)
require.NoError(t, err)
commitInfos := collectCommitInfos(t, commitIter)
require.Equal(t, 1, len(commitInfos))
resp, err := etcdClient.Get(context.Background(), col.DefaultPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
require.NoError(t, err)
for _, kv := range resp.Kvs {
// Delete 1 in 10 keys
if r.Intn(10) == 0 {
_, err := etcdClient.Delete(context.Background(), string(kv.Key))
require.NoError(t, err)
}
}
require.NoError(t, c.DeleteAll())
}
}
func TestPachdPrometheusStats(t *testing.T) {
t.Skip("flake")
if testing.Short() {
t.Skip("Skipping integration tests in short mode")
}
port := os.Getenv("PROM_PORT")
promClient, err := prom_api.NewClient(prom_api.Config{
Address: fmt.Sprintf("http://127.0.0.1:%v", port),
})
require.NoError(t, err)
promAPI := prom_api_v1.NewAPI(promClient)
countQuery := func(t *testing.T, query string) float64 {
result, err := promAPI.Query(context.Background(), query, time.Now())
require.NoError(t, err)
resultVec := result.(prom_model.Vector)
require.Equal(t, 1, len(resultVec))
return float64(resultVec[0].Value)
}
avgQuery := func(t *testing.T, sumQuery string, countQuery string, expected int) {
query := "(" + sumQuery + ")/(" + countQuery + ")"
result, err := promAPI.Query(context.Background(), query, time.Now())
require.NoError(t, err)
resultVec := result.(prom_model.Vector)
require.Equal(t, expected, len(resultVec))
}
// Check stats reported on pachd pod
pod := "app=\"pachd\""
without := "(instance)"
// Check PFS API is reported
t.Run("GetFileAvgRuntime", func(t *testing.T) {
sum := fmt.Sprintf("sum(pachyderm_pachd_get_file_time_sum{%v}) without %v", pod, without)
count := fmt.Sprintf("sum(pachyderm_pachd_get_file_time_count{%v}) without %v", pod, without)
avgQuery(t, sum, count, 2) // 2 results ... one for finished, one for errored
})
t.Run("PutFileAvgRuntime", func(t *testing.T) {
sum := fmt.Sprintf("sum(pachyderm_pachd_put_file_time_sum{%v}) without %v", pod, without)
count := fmt.Sprintf("sum(pachyderm_pachd_put_file_time_count{%v}) without %v", pod, without)
avgQuery(t, sum, count, 1)
})
t.Run("GetFileSeconds", func(t *testing.T) {
query := fmt.Sprintf("sum(pachyderm_pachd_get_file_seconds_count{%v}) without %v", pod, without)
countQuery(t, query) // Just check query has a result
})
t.Run("PutFileSeconds", func(t *testing.T) {
query := fmt.Sprintf("sum(pachyderm_pachd_put_file_seconds_count{%v}) without %v", pod, without)
countQuery(t, query) // Just check query has a result
})
// Check PPS API is reported
t.Run("ListJobSeconds", func(t *testing.T) {
query := fmt.Sprintf("sum(pachyderm_pachd_list_job_seconds_count{%v}) without %v", pod, without)
countQuery(t, query)
})
t.Run("ListJobAvgRuntime", func(t *testing.T) {
sum := fmt.Sprintf("sum(pachyderm_pachd_list_job_time_sum{%v}) without %v", pod, without)
count := fmt.Sprintf("sum(pachyderm_pachd_list_job_time_count{%v}) without %v", pod, without)
avgQuery(t, sum, count, 1)
})
caches := []string{"object", "tag", "object_info"}
for _, cache := range caches {
t.Run(fmt.Sprintf("cache_%v", cache), func(t *testing.T) {
query := fmt.Sprintf("pachyderm_pachd_cache_%v_loads_gauge", cache)
countQuery(t, query)
})
}
}
func getAllObjects(t testing.TB, c *client.APIClient) []*pfs.Object {
objectsClient, err := c.ListObjects(context.Background(), &pfs.ListObjectsRequest{})
require.NoError(t, err)
var objects []*pfs.Object
for object, err := objectsClient.Recv(); err != io.EOF; object, err = objectsClient.Recv() {
require.NoError(t, err)
objects = append(objects, object)
}
return objects
}
func getAllTags(t testing.TB, c *client.APIClient) []string {
tagsClient, err := c.ListTags(context.Background(), &pfs.ListTagsRequest{})
require.NoError(t, err)
var tags []string
for resp, err := tagsClient.Recv(); err != io.EOF; resp, err = tagsClient.Recv() {
require.NoError(t, err)
tags = append(tags, resp.Tag.Name)
}
return tags
}
func restartAll(t *testing.T) {
k := getKubeClient(t)
podsInterface := k.CoreV1().Pods(v1.NamespaceDefault)
podList, err := podsInterface.List(
metav1.ListOptions{
LabelSelector: "suite=pachyderm",
})
require.NoError(t, err)
for _, pod := range podList.Items {
require.NoError(t, podsInterface.Delete(pod.Name, &metav1.DeleteOptions{
GracePeriodSeconds: new(int64),
}))
}
waitForReadiness(t)
}
func restartOne(t *testing.T) {
k := getKubeClient(t)
podsInterface := k.CoreV1().Pods(v1.NamespaceDefault)
podList, err := podsInterface.List(
metav1.ListOptions{
LabelSelector: "app=pachd",
})
require.NoError(t, err)
require.NoError(t, podsInterface.Delete(
podList.Items[rand.Intn(len(podList.Items))].Name,
&metav1.DeleteOptions{GracePeriodSeconds: new(int64)}))
waitForReadiness(t)
}
const (
retries = 10
)
// getUsablePachClient is like getPachClient except it blocks until it gets a
// connection that actually works
func getUsablePachClient(t *testing.T) *client.APIClient {
for i := 0; i < retries; i++ {
client := getPachClient(t)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*30)
defer cancel() //cleanup resources
_, err := client.PfsAPIClient.ListRepo(ctx, &pfs.ListRepoRequest{})
if err == nil {
return client
}
}
t.Fatalf("failed to connect after %d tries", retries)
return nil
}
func podRunningAndReady(e watch.Event) (bool, error) {
if e.Type == watch.Deleted {
return false, errors.New("received DELETE while watching pods")
}
pod, ok := e.Object.(*v1.Pod)
if !ok {
}
return pod.Status.Phase == v1.PodRunning, nil
}
func waitForReadiness(t testing.TB) {
k := getKubeClient(t)
deployment := pachdDeployment(t)
for {
newDeployment, err := k.Apps().Deployments(v1.NamespaceDefault).Get(deployment.Name, metav1.GetOptions{})
require.NoError(t, err)
if newDeployment.Status.ObservedGeneration >= deployment.Generation && newDeployment.Status.Replicas == *newDeployment.Spec.Replicas {
break
}
time.Sleep(time.Second * 5)
}
watch, err := k.CoreV1().Pods(v1.NamespaceDefault).Watch(metav1.ListOptions{
LabelSelector: "app=pachd",
})
defer watch.Stop()
require.NoError(t, err)
readyPods := make(map[string]bool)
for event := range watch.ResultChan() {
ready, err := podRunningAndReady(event)
require.NoError(t, err)
if ready {
pod, ok := event.Object.(*v1.Pod)
if !ok {
t.Fatal("event.Object should be an object")
}
readyPods[pod.Name] = true
if len(readyPods) == int(*deployment.Spec.Replicas) {
break
}
}
}
}
func simulateGitPush(t *testing.T, pathToPayload string) {
payload, err := ioutil.ReadFile(pathToPayload)
require.NoError(t, err)
req, err := http.NewRequest(
"POST",
fmt.Sprintf("http://127.0.0.1:%v/v1/handle/push", githook.GitHookPort+30000),
bytes.NewBuffer(payload),
)
req.Header.Set("X-Github-Delivery", "2984f5d0-c032-11e7-82d7-ed3ee54be25d")
req.Header.Set("User-Agent", "GitHub-Hookshot/c1d08eb")
req.Header.Set("X-Github-Event", "push")
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
require.Equal(t, 200, resp.StatusCode)
}
func pipelineRc(t testing.TB, pipelineInfo *pps.PipelineInfo) (*v1.ReplicationController, error) {
k := getKubeClient(t)
rc := k.CoreV1().ReplicationControllers(v1.NamespaceDefault)
return rc.Get(
ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version),
metav1.GetOptions{})
}
func pachdDeployment(t testing.TB) *apps.Deployment {
k := getKubeClient(t)
result, err := k.Apps().Deployments(v1.NamespaceDefault).Get("pachd", metav1.GetOptions{})
require.NoError(t, err)
return result
}
// scalePachd scales the number of pachd nodes up or down.
// If up is true, then the number of nodes will be within (n, 2n]
// If up is false, then the number of nodes will be within [1, n)
func scalePachdRandom(t testing.TB, up bool) {
pachdRc := pachdDeployment(t)
originalReplicas := *pachdRc.Spec.Replicas
for {
if up {
*pachdRc.Spec.Replicas = originalReplicas + int32(rand.Intn(int(originalReplicas))+1)
} else {
*pachdRc.Spec.Replicas = int32(rand.Intn(int(originalReplicas)-1) + 1)
}
if *pachdRc.Spec.Replicas != originalReplicas {
break
}
}
scalePachdN(t, int(*pachdRc.Spec.Replicas))
}
// scalePachdN scales the number of pachd nodes to N
func scalePachdN(t testing.TB, n int) {
k := getKubeClient(t)
// Modify the type metadata of the Deployment spec we read from k8s, so that
// k8s will accept it if we're talking to a 1.7 cluster
pachdDeployment := pachdDeployment(t)
*pachdDeployment.Spec.Replicas = int32(n)
pachdDeployment.TypeMeta.APIVersion = "apps/v1beta1"
_, err := k.Apps().Deployments(v1.NamespaceDefault).Update(pachdDeployment)
require.NoError(t, err)
waitForReadiness(t)
// Unfortunately, even when all pods are ready, the cluster membership
// protocol might still be running, thus PFS API calls might fail. So
// we wait a little bit for membership to stablize.
time.Sleep(15 * time.Second)
}
// scalePachd reads the number of pachd nodes from an env variable and
// scales pachd accordingly.
func scalePachd(t testing.TB) {
nStr := os.Getenv("PACHD")
if nStr == "" {
return
}
n, err := strconv.Atoi(nStr)
require.NoError(t, err)
scalePachdN(t, n)
}
func getKubeClient(t testing.TB) *kube.Clientset {
var config *rest.Config
host := os.Getenv("KUBERNETES_SERVICE_HOST")
if host != "" {
var err error
config, err = rest.InClusterConfig()
require.NoError(t, err)
} else {
// Use kubectl binary to parse .kube/config and get address of current
// cluster. Hopefully, once we upgrade to k8s.io/client-go, we will be able
// to do this in-process with a library
// First, figure out if we're talking to minikube or localhost
cmd := exec.Command("kubectl", "config", "current-context")
if context, err := cmd.Output(); err == nil {
context = bytes.TrimSpace(context)
// kubectl has a context -- not talking to localhost
// Get cluster and user name from kubectl
buf := &bytes.Buffer{}
cmd := tu.BashCmd(strings.Join([]string{
`kubectl config get-contexts "{{.context}}" | tail -n+2 | awk '{print $3}'`,
`kubectl config get-contexts "{{.context}}" | tail -n+2 | awk '{print $4}'`,
}, "\n"),
"context", string(context))
cmd.Stdout = buf
require.NoError(t, cmd.Run(), "couldn't get kubernetes context info")
lines := strings.Split(buf.String(), "\n")
clustername, username := lines[0], lines[1]
// Get user info
buf.Reset()
cmd = tu.BashCmd(strings.Join([]string{
`cluster="$(kubectl config view -o json | jq -r '.users[] | select(.name == "{{.user}}") | .user' )"`,
`echo "${cluster}" | jq -r '.["client-certificate"]'`,
`echo "${cluster}" | jq -r '.["client-key"]'`,
}, "\n"),
"user", username)
cmd.Stdout = buf
require.NoError(t, cmd.Run(), "couldn't get kubernetes user info")
lines = strings.Split(buf.String(), "\n")
clientCert, clientKey := lines[0], lines[1]
// Get cluster info
buf.Reset()
cmd = tu.BashCmd(strings.Join([]string{
`cluster="$(kubectl config view -o json | jq -r '.clusters[] | select(.name == "{{.cluster}}") | .cluster')"`,
`echo "${cluster}" | jq -r .server`,
`echo "${cluster}" | jq -r '.["certificate-authority"]'`,
}, "\n"),
"cluster", clustername)
cmd.Stdout = buf
require.NoError(t, cmd.Run(), "couldn't get kubernetes cluster info: %s", buf.String())
lines = strings.Split(buf.String(), "\n")
address, CAKey := lines[0], lines[1]
// Generate config
config = &rest.Config{
Host: address,
TLSClientConfig: rest.TLSClientConfig{
CertFile: clientCert,
KeyFile: clientKey,
CAFile: CAKey,
},
}
} else {
// no context -- talking to localhost
config = &rest.Config{
Host: "http://0.0.0.0:8080",
TLSClientConfig: rest.TLSClientConfig{
Insecure: false,
},
}
}
}
k, err := kube.NewForConfig(config)
require.NoError(t, err)
return k
}
var pachClient *client.APIClient
var getPachClientOnce sync.Once
func getPachClient(t testing.TB) *client.APIClient {
getPachClientOnce.Do(func() {
var err error
if addr := os.Getenv("PACHD_PORT_650_TCP_ADDR"); addr != "" {
pachClient, err = client.NewInCluster()
} else {
pachClient, err = client.NewOnUserMachine(false, "user")
}
require.NoError(t, err)
})
return pachClient
}
var etcdClient *etcd.Client
var getEtcdClientOnce sync.Once
const (
etcdAddress = "localhost:32379" // etcd must already be serving at this address
)
func getEtcdClient(t testing.TB) *etcd.Client {
getEtcdClientOnce.Do(func() {
var err error
etcdClient, err = etcd.New(etcd.Config{
Endpoints: []string{etcdAddress},
DialOptions: client.EtcdDialOptions(),
})
require.NoError(t, err)
})
return etcdClient
}
|
package btelegram
import (
"html"
"path/filepath"
"strconv"
"strings"
"unicode/utf16"
"github.com/42wim/matterbridge/bridge/config"
"github.com/42wim/matterbridge/bridge/helper"
tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api"
)
func (b *Btelegram) handleUpdate(rmsg *config.Message, message, posted, edited *tgbotapi.Message) *tgbotapi.Message {
// handle channels
if posted != nil {
message = posted
rmsg.Text = message.Text
}
// edited channel message
if edited != nil && !b.GetBool("EditDisable") {
message = edited
rmsg.Text = rmsg.Text + message.Text + b.GetString("EditSuffix")
}
return message
}
// handleChannels checks if it's a channel message and if the message is a new or edited messages
func (b *Btelegram) handleChannels(rmsg *config.Message, message *tgbotapi.Message, update tgbotapi.Update) *tgbotapi.Message {
return b.handleUpdate(rmsg, message, update.ChannelPost, update.EditedChannelPost)
}
// handleGroups checks if it's a group message and if the message is a new or edited messages
func (b *Btelegram) handleGroups(rmsg *config.Message, message *tgbotapi.Message, update tgbotapi.Update) *tgbotapi.Message {
return b.handleUpdate(rmsg, message, update.Message, update.EditedMessage)
}
// handleForwarded handles forwarded messages
func (b *Btelegram) handleForwarded(rmsg *config.Message, message *tgbotapi.Message) {
if message.ForwardDate == 0 {
return
}
if message.ForwardFrom == nil {
rmsg.Text = "Forwarded from " + unknownUser + ": " + rmsg.Text
return
}
usernameForward := ""
if b.GetBool("UseFirstName") {
usernameForward = message.ForwardFrom.FirstName
}
if usernameForward == "" {
usernameForward = message.ForwardFrom.UserName
if usernameForward == "" {
usernameForward = message.ForwardFrom.FirstName
}
}
if usernameForward == "" {
usernameForward = unknownUser
}
rmsg.Text = "Forwarded from " + usernameForward + ": " + rmsg.Text
}
// handleQuoting handles quoting of previous messages
func (b *Btelegram) handleQuoting(rmsg *config.Message, message *tgbotapi.Message) {
if message.ReplyToMessage != nil {
usernameReply := ""
if message.ReplyToMessage.From != nil {
if b.GetBool("UseFirstName") {
usernameReply = message.ReplyToMessage.From.FirstName
}
if usernameReply == "" {
usernameReply = message.ReplyToMessage.From.UserName
if usernameReply == "" {
usernameReply = message.ReplyToMessage.From.FirstName
}
}
}
if usernameReply == "" {
usernameReply = unknownUser
}
if !b.GetBool("QuoteDisable") {
rmsg.Text = b.handleQuote(rmsg.Text, usernameReply, message.ReplyToMessage.Text)
}
}
}
// handleUsername handles the correct setting of the username
func (b *Btelegram) handleUsername(rmsg *config.Message, message *tgbotapi.Message) {
if message.From != nil {
rmsg.UserID = strconv.Itoa(message.From.ID)
if b.GetBool("UseFirstName") {
rmsg.Username = message.From.FirstName
}
if rmsg.Username == "" {
rmsg.Username = message.From.UserName
if rmsg.Username == "" {
rmsg.Username = message.From.FirstName
}
}
// only download avatars if we have a place to upload them (configured mediaserver)
if b.General.MediaServerUpload != "" || (b.General.MediaServerDownload != "" && b.General.MediaDownloadPath != "") {
b.handleDownloadAvatar(message.From.ID, rmsg.Channel)
}
}
// if we really didn't find a username, set it to unknown
if rmsg.Username == "" {
rmsg.Username = unknownUser
}
}
func (b *Btelegram) handleRecv(updates <-chan tgbotapi.Update) {
for update := range updates {
b.Log.Debugf("== Receiving event: %#v", update.Message)
if update.Message == nil && update.ChannelPost == nil &&
update.EditedMessage == nil && update.EditedChannelPost == nil {
b.Log.Error("Getting nil messages, this shouldn't happen.")
continue
}
var message *tgbotapi.Message
rmsg := config.Message{Account: b.Account, Extra: make(map[string][]interface{})}
// handle channels
message = b.handleChannels(&rmsg, message, update)
// handle groups
message = b.handleGroups(&rmsg, message, update)
if message == nil {
b.Log.Error("message is nil, this shouldn't happen.")
continue
}
// set the ID's from the channel or group message
rmsg.ID = strconv.Itoa(message.MessageID)
rmsg.Channel = strconv.FormatInt(message.Chat.ID, 10)
// handle username
b.handleUsername(&rmsg, message)
// handle any downloads
err := b.handleDownload(&rmsg, message)
if err != nil {
b.Log.Errorf("download failed: %s", err)
}
// handle forwarded messages
b.handleForwarded(&rmsg, message)
// quote the previous message
b.handleQuoting(&rmsg, message)
// handle entities (adding URLs)
b.handleEntities(&rmsg, message)
if rmsg.Text != "" || len(rmsg.Extra) > 0 {
rmsg.Text = helper.RemoveEmptyNewLines(rmsg.Text)
// channels don't have (always?) user information. see #410
if message.From != nil {
rmsg.Avatar = helper.GetAvatar(b.avatarMap, strconv.Itoa(message.From.ID), b.General)
}
b.Log.Debugf("<= Sending message from %s on %s to gateway", rmsg.Username, b.Account)
b.Log.Debugf("<= Message is %#v", rmsg)
b.Remote <- rmsg
}
}
}
// handleDownloadAvatar downloads the avatar of userid from channel
// sends a EVENT_AVATAR_DOWNLOAD message to the gateway if successful.
// logs an error message if it fails
func (b *Btelegram) handleDownloadAvatar(userid int, channel string) {
rmsg := config.Message{
Username: "system",
Text: "avatar",
Channel: channel,
Account: b.Account,
UserID: strconv.Itoa(userid),
Event: config.EventAvatarDownload,
Extra: make(map[string][]interface{}),
}
if _, ok := b.avatarMap[strconv.Itoa(userid)]; !ok {
photos, err := b.c.GetUserProfilePhotos(tgbotapi.UserProfilePhotosConfig{UserID: userid, Limit: 1})
if err != nil {
b.Log.Errorf("Userprofile download failed for %#v %s", userid, err)
}
if len(photos.Photos) > 0 {
photo := photos.Photos[0][0]
url := b.getFileDirectURL(photo.FileID)
name := strconv.Itoa(userid) + ".png"
b.Log.Debugf("trying to download %#v fileid %#v with size %#v", name, photo.FileID, photo.FileSize)
err := helper.HandleDownloadSize(b.Log, &rmsg, name, int64(photo.FileSize), b.General)
if err != nil {
b.Log.Error(err)
return
}
data, err := helper.DownloadFile(url)
if err != nil {
b.Log.Errorf("download %s failed %#v", url, err)
return
}
helper.HandleDownloadData(b.Log, &rmsg, name, rmsg.Text, "", data, b.General)
b.Remote <- rmsg
}
}
}
func (b *Btelegram) maybeConvertTgs(name *string, data *[]byte) {
format := b.GetString("MediaConvertTgs")
if helper.SupportsFormat(format) {
b.Log.Debugf("Format supported by %s, converting %v", helper.LottieBackend(), name)
} else {
// Otherwise, no conversion was requested. Trying to run the usual webp
// converter would fail, because '.tgs.webp' is actually a gzipped JSON
// file, and has nothing to do with WebP.
return
}
err := helper.ConvertTgsToX(data, format, b.Log)
if err != nil {
b.Log.Errorf("conversion failed: %v", err)
} else {
*name = strings.Replace(*name, "tgs.webp", format, 1)
}
}
func (b *Btelegram) maybeConvertWebp(name *string, data *[]byte) {
if b.GetBool("MediaConvertWebPToPNG") {
b.Log.Debugf("WebP to PNG conversion enabled, converting %v", name)
err := helper.ConvertWebPToPNG(data)
if err != nil {
b.Log.Errorf("conversion failed: %v", err)
} else {
*name = strings.Replace(*name, ".webp", ".png", 1)
}
}
}
// handleDownloadFile handles file download
func (b *Btelegram) handleDownload(rmsg *config.Message, message *tgbotapi.Message) error {
size := 0
var url, name, text string
switch {
case message.Sticker != nil:
text, name, url = b.getDownloadInfo(message.Sticker.FileID, ".webp", true)
size = message.Sticker.FileSize
case message.Voice != nil:
text, name, url = b.getDownloadInfo(message.Voice.FileID, ".ogg", true)
size = message.Voice.FileSize
case message.Video != nil:
text, name, url = b.getDownloadInfo(message.Video.FileID, "", true)
size = message.Video.FileSize
case message.Audio != nil:
text, name, url = b.getDownloadInfo(message.Audio.FileID, "", true)
size = message.Audio.FileSize
case message.Document != nil:
_, _, url = b.getDownloadInfo(message.Document.FileID, "", false)
size = message.Document.FileSize
name = message.Document.FileName
text = " " + message.Document.FileName + " : " + url
case message.Photo != nil:
photos := *message.Photo
size = photos[len(photos)-1].FileSize
text, name, url = b.getDownloadInfo(photos[len(photos)-1].FileID, "", true)
}
// if name is empty we didn't match a thing to download
if name == "" {
return nil
}
// use the URL instead of native upload
if b.GetBool("UseInsecureURL") {
b.Log.Debugf("Setting message text to :%s", text)
rmsg.Text += text
return nil
}
// if we have a file attached, download it (in memory) and put a pointer to it in msg.Extra
err := helper.HandleDownloadSize(b.Log, rmsg, name, int64(size), b.General)
if err != nil {
return err
}
data, err := helper.DownloadFile(url)
if err != nil {
return err
}
if strings.HasSuffix(name, ".tgs.webp") {
b.maybeConvertTgs(&name, data)
} else if strings.HasSuffix(name, ".webp") {
b.maybeConvertWebp(&name, data)
}
// rename .oga to .ogg https://github.com/42wim/matterbridge/issues/906#issuecomment-741793512
if strings.HasSuffix(name, ".oga") && message.Audio != nil {
name = strings.Replace(name, ".oga", ".ogg", 1)
}
helper.HandleDownloadData(b.Log, rmsg, name, message.Caption, "", data, b.General)
return nil
}
func (b *Btelegram) getDownloadInfo(id string, suffix string, urlpart bool) (string, string, string) {
url := b.getFileDirectURL(id)
name := ""
if urlpart {
urlPart := strings.Split(url, "/")
name = urlPart[len(urlPart)-1]
}
if suffix != "" && !strings.HasSuffix(name, suffix) {
name += suffix
}
text := " " + url
return text, name, url
}
// handleDelete handles message deleting
func (b *Btelegram) handleDelete(msg *config.Message, chatid int64) (string, error) {
if msg.ID == "" {
return "", nil
}
msgid, err := strconv.Atoi(msg.ID)
if err != nil {
return "", err
}
_, err = b.c.DeleteMessage(tgbotapi.DeleteMessageConfig{ChatID: chatid, MessageID: msgid})
return "", err
}
// handleEdit handles message editing.
func (b *Btelegram) handleEdit(msg *config.Message, chatid int64) (string, error) {
msgid, err := strconv.Atoi(msg.ID)
if err != nil {
return "", err
}
if strings.ToLower(b.GetString("MessageFormat")) == HTMLNick {
b.Log.Debug("Using mode HTML - nick only")
msg.Text = html.EscapeString(msg.Text)
}
m := tgbotapi.NewEditMessageText(chatid, msgid, msg.Username+msg.Text)
switch b.GetString("MessageFormat") {
case HTMLFormat:
b.Log.Debug("Using mode HTML")
m.ParseMode = tgbotapi.ModeHTML
case "Markdown":
b.Log.Debug("Using mode markdown")
m.ParseMode = tgbotapi.ModeMarkdown
case MarkdownV2:
b.Log.Debug("Using mode MarkdownV2")
m.ParseMode = MarkdownV2
}
if strings.ToLower(b.GetString("MessageFormat")) == HTMLNick {
b.Log.Debug("Using mode HTML - nick only")
m.ParseMode = tgbotapi.ModeHTML
}
_, err = b.c.Send(m)
if err != nil {
return "", err
}
return "", nil
}
// handleUploadFile handles native upload of files
func (b *Btelegram) handleUploadFile(msg *config.Message, chatid int64) string {
var c tgbotapi.Chattable
for _, f := range msg.Extra["file"] {
fi := f.(config.FileInfo)
file := tgbotapi.FileBytes{
Name: fi.Name,
Bytes: *fi.Data,
}
switch filepath.Ext(fi.Name) {
case ".jpg", ".jpe", ".png":
pc := tgbotapi.NewPhotoUpload(chatid, file)
pc.Caption, pc.ParseMode = TGGetParseMode(b, msg.Username, fi.Comment)
c = pc
case ".mp4", ".m4v":
vc := tgbotapi.NewVideoUpload(chatid, file)
vc.Caption, vc.ParseMode = TGGetParseMode(b, msg.Username, fi.Comment)
c = vc
case ".mp3", ".oga":
ac := tgbotapi.NewAudioUpload(chatid, file)
ac.Caption, ac.ParseMode = TGGetParseMode(b, msg.Username, fi.Comment)
c = ac
case ".ogg":
voc := tgbotapi.NewVoiceUpload(chatid, file)
voc.Caption, voc.ParseMode = TGGetParseMode(b, msg.Username, fi.Comment)
c = voc
default:
dc := tgbotapi.NewDocumentUpload(chatid, file)
dc.Caption, dc.ParseMode = TGGetParseMode(b, msg.Username, fi.Comment)
c = dc
}
_, err := b.c.Send(c)
if err != nil {
b.Log.Errorf("file upload failed: %#v", err)
}
}
return ""
}
func (b *Btelegram) handleQuote(message, quoteNick, quoteMessage string) string {
format := b.GetString("quoteformat")
if format == "" {
format = "{MESSAGE} (re @{QUOTENICK}: {QUOTEMESSAGE})"
}
quoteMessagelength := len([]rune(quoteMessage))
if b.GetInt("QuoteLengthLimit") != 0 && quoteMessagelength >= b.GetInt("QuoteLengthLimit") {
runes := []rune(quoteMessage)
quoteMessage = string(runes[0:b.GetInt("QuoteLengthLimit")])
if quoteMessagelength > b.GetInt("QuoteLengthLimit") {
quoteMessage += "..."
}
}
format = strings.Replace(format, "{MESSAGE}", message, -1)
format = strings.Replace(format, "{QUOTENICK}", quoteNick, -1)
format = strings.Replace(format, "{QUOTEMESSAGE}", quoteMessage, -1)
return format
}
// handleEntities handles messageEntities
func (b *Btelegram) handleEntities(rmsg *config.Message, message *tgbotapi.Message) {
if message.Entities == nil {
return
}
// for now only do URL replacements
for _, e := range *message.Entities {
if e.Type == "text_link" {
url, err := e.ParseURL()
if err != nil {
b.Log.Errorf("entity text_link url parse failed: %s", err)
continue
}
utfEncodedString := utf16.Encode([]rune(rmsg.Text))
if e.Offset+e.Length > len(utfEncodedString) {
b.Log.Errorf("entity length is too long %d > %d", e.Offset+e.Length, len(utfEncodedString))
continue
}
link := utf16.Decode(utfEncodedString[e.Offset : e.Offset+e.Length])
rmsg.Text = strings.Replace(rmsg.Text, string(link), url.String(), 1)
}
}
}
Add support for code blocks in telegram (#1650)
* handle code blocks in telegram.
* support multi-line code blocks.
* remove import.
* handle code blocks in middle of normal text.
* support multiple code blocks in same message.
package btelegram
import (
"html"
"path/filepath"
"strconv"
"strings"
"unicode/utf16"
"github.com/42wim/matterbridge/bridge/config"
"github.com/42wim/matterbridge/bridge/helper"
tgbotapi "github.com/go-telegram-bot-api/telegram-bot-api"
)
func (b *Btelegram) handleUpdate(rmsg *config.Message, message, posted, edited *tgbotapi.Message) *tgbotapi.Message {
// handle channels
if posted != nil {
message = posted
rmsg.Text = message.Text
}
// edited channel message
if edited != nil && !b.GetBool("EditDisable") {
message = edited
rmsg.Text = rmsg.Text + message.Text + b.GetString("EditSuffix")
}
return message
}
// handleChannels checks if it's a channel message and if the message is a new or edited messages
func (b *Btelegram) handleChannels(rmsg *config.Message, message *tgbotapi.Message, update tgbotapi.Update) *tgbotapi.Message {
return b.handleUpdate(rmsg, message, update.ChannelPost, update.EditedChannelPost)
}
// handleGroups checks if it's a group message and if the message is a new or edited messages
func (b *Btelegram) handleGroups(rmsg *config.Message, message *tgbotapi.Message, update tgbotapi.Update) *tgbotapi.Message {
return b.handleUpdate(rmsg, message, update.Message, update.EditedMessage)
}
// handleForwarded handles forwarded messages
func (b *Btelegram) handleForwarded(rmsg *config.Message, message *tgbotapi.Message) {
if message.ForwardDate == 0 {
return
}
if message.ForwardFrom == nil {
rmsg.Text = "Forwarded from " + unknownUser + ": " + rmsg.Text
return
}
usernameForward := ""
if b.GetBool("UseFirstName") {
usernameForward = message.ForwardFrom.FirstName
}
if usernameForward == "" {
usernameForward = message.ForwardFrom.UserName
if usernameForward == "" {
usernameForward = message.ForwardFrom.FirstName
}
}
if usernameForward == "" {
usernameForward = unknownUser
}
rmsg.Text = "Forwarded from " + usernameForward + ": " + rmsg.Text
}
// handleQuoting handles quoting of previous messages
func (b *Btelegram) handleQuoting(rmsg *config.Message, message *tgbotapi.Message) {
if message.ReplyToMessage != nil {
usernameReply := ""
if message.ReplyToMessage.From != nil {
if b.GetBool("UseFirstName") {
usernameReply = message.ReplyToMessage.From.FirstName
}
if usernameReply == "" {
usernameReply = message.ReplyToMessage.From.UserName
if usernameReply == "" {
usernameReply = message.ReplyToMessage.From.FirstName
}
}
}
if usernameReply == "" {
usernameReply = unknownUser
}
if !b.GetBool("QuoteDisable") {
rmsg.Text = b.handleQuote(rmsg.Text, usernameReply, message.ReplyToMessage.Text)
}
}
}
// handleUsername handles the correct setting of the username
func (b *Btelegram) handleUsername(rmsg *config.Message, message *tgbotapi.Message) {
if message.From != nil {
rmsg.UserID = strconv.Itoa(message.From.ID)
if b.GetBool("UseFirstName") {
rmsg.Username = message.From.FirstName
}
if rmsg.Username == "" {
rmsg.Username = message.From.UserName
if rmsg.Username == "" {
rmsg.Username = message.From.FirstName
}
}
// only download avatars if we have a place to upload them (configured mediaserver)
if b.General.MediaServerUpload != "" || (b.General.MediaServerDownload != "" && b.General.MediaDownloadPath != "") {
b.handleDownloadAvatar(message.From.ID, rmsg.Channel)
}
}
// if we really didn't find a username, set it to unknown
if rmsg.Username == "" {
rmsg.Username = unknownUser
}
}
func (b *Btelegram) handleRecv(updates <-chan tgbotapi.Update) {
for update := range updates {
b.Log.Debugf("== Receiving event: %#v", update.Message)
if update.Message == nil && update.ChannelPost == nil &&
update.EditedMessage == nil && update.EditedChannelPost == nil {
b.Log.Error("Getting nil messages, this shouldn't happen.")
continue
}
var message *tgbotapi.Message
rmsg := config.Message{Account: b.Account, Extra: make(map[string][]interface{})}
// handle channels
message = b.handleChannels(&rmsg, message, update)
// handle groups
message = b.handleGroups(&rmsg, message, update)
if message == nil {
b.Log.Error("message is nil, this shouldn't happen.")
continue
}
// set the ID's from the channel or group message
rmsg.ID = strconv.Itoa(message.MessageID)
rmsg.Channel = strconv.FormatInt(message.Chat.ID, 10)
// handle username
b.handleUsername(&rmsg, message)
// handle any downloads
err := b.handleDownload(&rmsg, message)
if err != nil {
b.Log.Errorf("download failed: %s", err)
}
// handle forwarded messages
b.handleForwarded(&rmsg, message)
// quote the previous message
b.handleQuoting(&rmsg, message)
// handle entities (adding URLs)
b.handleEntities(&rmsg, message)
if rmsg.Text != "" || len(rmsg.Extra) > 0 {
rmsg.Text = helper.RemoveEmptyNewLines(rmsg.Text)
// channels don't have (always?) user information. see #410
if message.From != nil {
rmsg.Avatar = helper.GetAvatar(b.avatarMap, strconv.Itoa(message.From.ID), b.General)
}
b.Log.Debugf("<= Sending message from %s on %s to gateway", rmsg.Username, b.Account)
b.Log.Debugf("<= Message is %#v", rmsg)
b.Remote <- rmsg
}
}
}
// handleDownloadAvatar downloads the avatar of userid from channel
// sends a EVENT_AVATAR_DOWNLOAD message to the gateway if successful.
// logs an error message if it fails
func (b *Btelegram) handleDownloadAvatar(userid int, channel string) {
rmsg := config.Message{
Username: "system",
Text: "avatar",
Channel: channel,
Account: b.Account,
UserID: strconv.Itoa(userid),
Event: config.EventAvatarDownload,
Extra: make(map[string][]interface{}),
}
if _, ok := b.avatarMap[strconv.Itoa(userid)]; !ok {
photos, err := b.c.GetUserProfilePhotos(tgbotapi.UserProfilePhotosConfig{UserID: userid, Limit: 1})
if err != nil {
b.Log.Errorf("Userprofile download failed for %#v %s", userid, err)
}
if len(photos.Photos) > 0 {
photo := photos.Photos[0][0]
url := b.getFileDirectURL(photo.FileID)
name := strconv.Itoa(userid) + ".png"
b.Log.Debugf("trying to download %#v fileid %#v with size %#v", name, photo.FileID, photo.FileSize)
err := helper.HandleDownloadSize(b.Log, &rmsg, name, int64(photo.FileSize), b.General)
if err != nil {
b.Log.Error(err)
return
}
data, err := helper.DownloadFile(url)
if err != nil {
b.Log.Errorf("download %s failed %#v", url, err)
return
}
helper.HandleDownloadData(b.Log, &rmsg, name, rmsg.Text, "", data, b.General)
b.Remote <- rmsg
}
}
}
func (b *Btelegram) maybeConvertTgs(name *string, data *[]byte) {
format := b.GetString("MediaConvertTgs")
if helper.SupportsFormat(format) {
b.Log.Debugf("Format supported by %s, converting %v", helper.LottieBackend(), name)
} else {
// Otherwise, no conversion was requested. Trying to run the usual webp
// converter would fail, because '.tgs.webp' is actually a gzipped JSON
// file, and has nothing to do with WebP.
return
}
err := helper.ConvertTgsToX(data, format, b.Log)
if err != nil {
b.Log.Errorf("conversion failed: %v", err)
} else {
*name = strings.Replace(*name, "tgs.webp", format, 1)
}
}
func (b *Btelegram) maybeConvertWebp(name *string, data *[]byte) {
if b.GetBool("MediaConvertWebPToPNG") {
b.Log.Debugf("WebP to PNG conversion enabled, converting %v", name)
err := helper.ConvertWebPToPNG(data)
if err != nil {
b.Log.Errorf("conversion failed: %v", err)
} else {
*name = strings.Replace(*name, ".webp", ".png", 1)
}
}
}
// handleDownloadFile handles file download
func (b *Btelegram) handleDownload(rmsg *config.Message, message *tgbotapi.Message) error {
size := 0
var url, name, text string
switch {
case message.Sticker != nil:
text, name, url = b.getDownloadInfo(message.Sticker.FileID, ".webp", true)
size = message.Sticker.FileSize
case message.Voice != nil:
text, name, url = b.getDownloadInfo(message.Voice.FileID, ".ogg", true)
size = message.Voice.FileSize
case message.Video != nil:
text, name, url = b.getDownloadInfo(message.Video.FileID, "", true)
size = message.Video.FileSize
case message.Audio != nil:
text, name, url = b.getDownloadInfo(message.Audio.FileID, "", true)
size = message.Audio.FileSize
case message.Document != nil:
_, _, url = b.getDownloadInfo(message.Document.FileID, "", false)
size = message.Document.FileSize
name = message.Document.FileName
text = " " + message.Document.FileName + " : " + url
case message.Photo != nil:
photos := *message.Photo
size = photos[len(photos)-1].FileSize
text, name, url = b.getDownloadInfo(photos[len(photos)-1].FileID, "", true)
}
// if name is empty we didn't match a thing to download
if name == "" {
return nil
}
// use the URL instead of native upload
if b.GetBool("UseInsecureURL") {
b.Log.Debugf("Setting message text to :%s", text)
rmsg.Text += text
return nil
}
// if we have a file attached, download it (in memory) and put a pointer to it in msg.Extra
err := helper.HandleDownloadSize(b.Log, rmsg, name, int64(size), b.General)
if err != nil {
return err
}
data, err := helper.DownloadFile(url)
if err != nil {
return err
}
if strings.HasSuffix(name, ".tgs.webp") {
b.maybeConvertTgs(&name, data)
} else if strings.HasSuffix(name, ".webp") {
b.maybeConvertWebp(&name, data)
}
// rename .oga to .ogg https://github.com/42wim/matterbridge/issues/906#issuecomment-741793512
if strings.HasSuffix(name, ".oga") && message.Audio != nil {
name = strings.Replace(name, ".oga", ".ogg", 1)
}
helper.HandleDownloadData(b.Log, rmsg, name, message.Caption, "", data, b.General)
return nil
}
func (b *Btelegram) getDownloadInfo(id string, suffix string, urlpart bool) (string, string, string) {
url := b.getFileDirectURL(id)
name := ""
if urlpart {
urlPart := strings.Split(url, "/")
name = urlPart[len(urlPart)-1]
}
if suffix != "" && !strings.HasSuffix(name, suffix) {
name += suffix
}
text := " " + url
return text, name, url
}
// handleDelete handles message deleting
func (b *Btelegram) handleDelete(msg *config.Message, chatid int64) (string, error) {
if msg.ID == "" {
return "", nil
}
msgid, err := strconv.Atoi(msg.ID)
if err != nil {
return "", err
}
_, err = b.c.DeleteMessage(tgbotapi.DeleteMessageConfig{ChatID: chatid, MessageID: msgid})
return "", err
}
// handleEdit handles message editing.
func (b *Btelegram) handleEdit(msg *config.Message, chatid int64) (string, error) {
msgid, err := strconv.Atoi(msg.ID)
if err != nil {
return "", err
}
if strings.ToLower(b.GetString("MessageFormat")) == HTMLNick {
b.Log.Debug("Using mode HTML - nick only")
msg.Text = html.EscapeString(msg.Text)
}
m := tgbotapi.NewEditMessageText(chatid, msgid, msg.Username+msg.Text)
switch b.GetString("MessageFormat") {
case HTMLFormat:
b.Log.Debug("Using mode HTML")
m.ParseMode = tgbotapi.ModeHTML
case "Markdown":
b.Log.Debug("Using mode markdown")
m.ParseMode = tgbotapi.ModeMarkdown
case MarkdownV2:
b.Log.Debug("Using mode MarkdownV2")
m.ParseMode = MarkdownV2
}
if strings.ToLower(b.GetString("MessageFormat")) == HTMLNick {
b.Log.Debug("Using mode HTML - nick only")
m.ParseMode = tgbotapi.ModeHTML
}
_, err = b.c.Send(m)
if err != nil {
return "", err
}
return "", nil
}
// handleUploadFile handles native upload of files
func (b *Btelegram) handleUploadFile(msg *config.Message, chatid int64) string {
var c tgbotapi.Chattable
for _, f := range msg.Extra["file"] {
fi := f.(config.FileInfo)
file := tgbotapi.FileBytes{
Name: fi.Name,
Bytes: *fi.Data,
}
switch filepath.Ext(fi.Name) {
case ".jpg", ".jpe", ".png":
pc := tgbotapi.NewPhotoUpload(chatid, file)
pc.Caption, pc.ParseMode = TGGetParseMode(b, msg.Username, fi.Comment)
c = pc
case ".mp4", ".m4v":
vc := tgbotapi.NewVideoUpload(chatid, file)
vc.Caption, vc.ParseMode = TGGetParseMode(b, msg.Username, fi.Comment)
c = vc
case ".mp3", ".oga":
ac := tgbotapi.NewAudioUpload(chatid, file)
ac.Caption, ac.ParseMode = TGGetParseMode(b, msg.Username, fi.Comment)
c = ac
case ".ogg":
voc := tgbotapi.NewVoiceUpload(chatid, file)
voc.Caption, voc.ParseMode = TGGetParseMode(b, msg.Username, fi.Comment)
c = voc
default:
dc := tgbotapi.NewDocumentUpload(chatid, file)
dc.Caption, dc.ParseMode = TGGetParseMode(b, msg.Username, fi.Comment)
c = dc
}
_, err := b.c.Send(c)
if err != nil {
b.Log.Errorf("file upload failed: %#v", err)
}
}
return ""
}
func (b *Btelegram) handleQuote(message, quoteNick, quoteMessage string) string {
format := b.GetString("quoteformat")
if format == "" {
format = "{MESSAGE} (re @{QUOTENICK}: {QUOTEMESSAGE})"
}
quoteMessagelength := len([]rune(quoteMessage))
if b.GetInt("QuoteLengthLimit") != 0 && quoteMessagelength >= b.GetInt("QuoteLengthLimit") {
runes := []rune(quoteMessage)
quoteMessage = string(runes[0:b.GetInt("QuoteLengthLimit")])
if quoteMessagelength > b.GetInt("QuoteLengthLimit") {
quoteMessage += "..."
}
}
format = strings.Replace(format, "{MESSAGE}", message, -1)
format = strings.Replace(format, "{QUOTENICK}", quoteNick, -1)
format = strings.Replace(format, "{QUOTEMESSAGE}", quoteMessage, -1)
return format
}
// handleEntities handles messageEntities
func (b *Btelegram) handleEntities(rmsg *config.Message, message *tgbotapi.Message) {
if message.Entities == nil {
return
}
var indexMovedBy = 0
// for now only do URL replacements
for _, e := range *message.Entities {
if e.Type == "text_link" {
url, err := e.ParseURL()
if err != nil {
b.Log.Errorf("entity text_link url parse failed: %s", err)
continue
}
utfEncodedString := utf16.Encode([]rune(rmsg.Text))
if e.Offset+e.Length > len(utfEncodedString) {
b.Log.Errorf("entity length is too long %d > %d", e.Offset+e.Length, len(utfEncodedString))
continue
}
link := utf16.Decode(utfEncodedString[e.Offset : e.Offset+e.Length])
rmsg.Text = strings.Replace(rmsg.Text, string(link), url.String(), 1)
}
if e.Type == "code" {
var offset = e.Offset + indexMovedBy
rmsg.Text = rmsg.Text[:offset] + "`" + rmsg.Text[offset:offset + e.Length] + "`" + rmsg.Text[offset + e.Length :]
indexMovedBy += 2
}
if e.Type == "pre" {
var offset = e.Offset + indexMovedBy
rmsg.Text = rmsg.Text[:offset] + "```\n" + rmsg.Text[offset:offset + e.Length] + "\n```" + rmsg.Text[offset + e.Length :]
indexMovedBy += 8
}
}
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_4"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/gomega"
)
const (
FederationReplicaSetName = "federation-replicaset"
)
// Create/delete replicaset api objects
var _ = framework.KubeDescribe("Federation replicasets [Feature:Federation]", func() {
f := framework.NewDefaultFederatedFramework("federation-replicaset")
Describe("ReplicaSet objects", func() {
AfterEach(func() {
framework.SkipUnlessFederated(f.Client)
// Delete registered replicasets.
nsName := f.FederationNamespace.Name
replicasetList, err := f.FederationClientset_1_4.Extensions().ReplicaSets(nsName).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, replicaset := range replicasetList.Items {
err := f.FederationClientset_1_4.Extensions().ReplicaSets(nsName).Delete(replicaset.Name, &api.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
})
It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.Client)
nsName := f.FederationNamespace.Name
replicaset := createReplicaSetOrFail(f.FederationClientset_1_4, nsName)
By(fmt.Sprintf("Creation of replicaset %q in namespace %q succeeded. Deleting replicaset.", replicaset.Name, nsName))
// Cleanup
err := f.FederationClientset_1_4.Extensions().ReplicaSets(nsName).Delete(replicaset.Name, &api.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting replicaset %q in namespace %q", replicaset.Name, replicaset.Namespace)
By(fmt.Sprintf("Deletion of replicaset %q in namespace %q succeeded.", replicaset.Name, nsName))
})
})
})
func createReplicaSetOrFail(clientset *federation_release_1_4.Clientset, namespace string) *v1beta1.ReplicaSet {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createReplicaSetOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
By(fmt.Sprintf("Creating federation replicaset %q in namespace %q", FederationReplicaSetName, namespace))
replicas := int32(5)
replicaset := &v1beta1.ReplicaSet{
ObjectMeta: v1.ObjectMeta{
Name: FederationReplicaSetName,
Namespace: namespace,
},
Spec: v1beta1.ReplicaSetSpec{
Replicas: &replicas,
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": "myrs"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "nginx",
},
},
},
},
},
}
_, err := clientset.Extensions().ReplicaSets(namespace).Create(replicaset)
framework.ExpectNoError(err, "Creating replicaset %q in namespace %q", replicaset.Name, namespace)
By(fmt.Sprintf("Successfully created federation replicaset %q in namespace %q", FederationReplicaSetName, namespace))
return replicaset
}
update e2e test for federation replicaset controlelr
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
"time"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_4"
fedutil "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api/errors"
"reflect"
)
const (
FederationReplicaSetName = "federation-replicaset"
FederatedReplicaSetTimeout = 120 * time.Second
)
// Create/delete replicaset api objects
var _ = framework.KubeDescribe("Federation replicasets [Feature:Federation]", func() {
f := framework.NewDefaultFederatedFramework("federation-replicaset")
Describe("ReplicaSet objects", func() {
AfterEach(func() {
framework.SkipUnlessFederated(f.Client)
// Delete registered replicasets.
nsName := f.FederationNamespace.Name
replicasetList, err := f.FederationClientset_1_4.Extensions().ReplicaSets(nsName).List(api.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, replicaset := range replicasetList.Items {
err := f.FederationClientset_1_4.Extensions().ReplicaSets(nsName).Delete(replicaset.Name, &api.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
})
It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.Client)
nsName := f.FederationNamespace.Name
replicaset := createReplicaSetOrFail(f.FederationClientset_1_4, nsName)
By(fmt.Sprintf("Creation of replicaset %q in namespace %q succeeded. Deleting replicaset.", replicaset.Name, nsName))
// Cleanup
err := f.FederationClientset_1_4.Extensions().ReplicaSets(nsName).Delete(replicaset.Name, &api.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting replicaset %q in namespace %q", replicaset.Name, replicaset.Namespace)
By(fmt.Sprintf("Deletion of replicaset %q in namespace %q succeeded.", replicaset.Name, nsName))
})
})
// e2e cases for federated replicaset controller
Describe("Federated ReplicaSet", func() {
var (
clusters map[string]*cluster
federationName string
)
BeforeEach(func() {
framework.SkipUnlessFederated(f.Client)
if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" {
federationName = DefaultFederationName
}
registerClusters(clusters, UserAgentName, federationName, f)
})
AfterEach(func() {
unregisterClusters(clusters, f)
})
It("should create and update matching replicasets in underling clusters", func() {
rs := createReplicaSetOrFail(f.FederationClientset_1_4, f.Namespace.Namespace)
defer func() { // cleanup. deletion of replicasets is not supported for underling clusters
By(fmt.Sprintf("zero replicas then delete replicaset %q/%q", f.Namespace.Name, rs.Name))
replicas := int32(0)
rs.Spec.Replicas = &replicas
f.FederationClientset_1_4.ReplicaSets(f.Namespace.Name).Update(rs)
waitForReplicaSetOrFail(f.FederationClientset_1_4, f.Namespace.Name, rs.Name, clusters)
f.FederationClientset_1_4.ReplicaSets(f.Namespace.Name).Delete(rs.Name, &api.DeleteOptions{})
}()
waitForReplicaSetOrFail(f.FederationClientset_1_4, f.Namespace.Name, rs.Name, clusters)
By(fmt.Sprintf("Successfuly created and synced replicaset %q/%q to clusters", f.Namespace.Namespace, rs.Name))
updateReplicaSetOrFail(f.FederationClientset_1_4, f.Namespace.Namespace)
waitForReplicaSetOrFail(f.FederationClientset_1_4, f.Namespace.Name, rs.Name, clusters)
By(fmt.Sprintf("Successfuly updated and synced replicaset %q/%q to clusters", f.Namespace.Namespace, rs.Name))
})
})
})
func waitForReplicaSetOrFail(c *federation_release_1_4.Clientset, namespace string, replicaSetName string, clusters map[string]*cluster) {
err := waitForReplicaSet(c, namespace, replicaSetName, clusters)
framework.ExpectNoError(err, "Failed to verify replicaset %q/%q, err: %v", namespace, replicaSetName, err)
}
func waitForReplicaSet(c *federation_release_1_4.Clientset, namespace string, replicaSetName string, clusters map[string]*cluster) error {
err := wait.Poll(10*time.Second, FederatedReplicaSetTimeout, func() (bool, error) {
frs, err := c.ReplicaSets(namespace).Get(replicaSetName)
if err != nil {
return false, err
}
specReplicas, statusReplicas := int32(0), int32(0)
for _, cluster := range clusters {
rs, err := cluster.ReplicaSets(namespace).Get(replicaSetName)
if err != nil && !errors.IsNotFound(err) {
By(fmt.Sprintf("Failed getting replicaset: %q/%q/%q, err: %v", cluster.name, namespace, replicaSetName, err))
return false, err
}
if err == nil {
if !equivalentReplicaSet(frs, rs) {
By(fmt.Sprintf("Replicaset meta or spec not match for cluster %q:\n federation: %v\n cluster: %v", cluster.name, frs, rs))
return false, nil
}
specReplicas += *rs.Spec.Replicas
statusReplicas += rs.Status.Replicas
}
}
if statusReplicas == frs.Status.Replicas && specReplicas >= *frs.Spec.Replicas {
return true, nil
}
By(fmt.Sprintf("Replicas not match, federation replicas: %v/%v, clusters replicas: %v/%v\n", *frs.Spec.Replicas, frs.Status.Replicas, specReplicas, statusReplicas))
return false, nil
})
return err
}
func equivalentReplicaSet(fedReplicaSet, localReplicaSet *v1beta1.ReplicaSet) bool {
localReplicaSetSpec := localReplicaSet.Spec
localReplicaSetSpec.Replicas = fedReplicaSet.Spec.Replicas
return fedutil.ObjectMetaEquivalent(fedReplicaSet.ObjectMeta, localReplicaSet.ObjectMeta) &&
reflect.DeepEqual(fedReplicaSet.Spec, localReplicaSetSpec)
}
func createReplicaSetOrFail(clientset *federation_release_1_4.Clientset, namespace string) *v1beta1.ReplicaSet {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createReplicaSetOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
By(fmt.Sprintf("Creating federation replicaset %q in namespace %q", FederationReplicaSetName, namespace))
replicaset := newReplicaSet(namespace, FederationReplicaSetName, 5)
_, err := clientset.Extensions().ReplicaSets(namespace).Create(replicaset)
framework.ExpectNoError(err, "Creating replicaset %q in namespace %q", replicaset.Name, namespace)
By(fmt.Sprintf("Successfully created federation replicaset %q in namespace %q", FederationReplicaSetName, namespace))
return replicaset
}
func updateReplicaSetOrFail(clientset *federation_release_1_4.Clientset, namespace string) *v1beta1.ReplicaSet {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateReplicaSetOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
By(fmt.Sprintf("Updating federation replicaset %q in namespace %q", FederationReplicaSetName, namespace))
replicaset := newReplicaSet(namespace, FederationReplicaSetName, 15)
newRs, err := clientset.ReplicaSets(namespace).Update(replicaset)
framework.ExpectNoError(err, "Updating replicaset %q in namespace %q", replicaset.Name, namespace)
By(fmt.Sprintf("Successfully updated federation replicaset %q in namespace %q", FederationReplicaSetName, namespace))
return newRs
}
func newReplicaSet(namespace string, name string, replicas int32) *v1beta1.ReplicaSet {
return &v1beta1.ReplicaSet{
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1beta1.ReplicaSetSpec{
Replicas: &replicas,
Selector: &v1beta1.LabelSelector{
MatchLabels: map[string]string{"name": "myrs"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": "myrs"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "nginx",
},
},
},
},
},
}
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that various VolumeSources are working.
*
* There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file
* into web server root and loadind the index.html from it.
*
* These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
* other privileged magic in the server pod.
*
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/
package framework
import (
"fmt"
"strconv"
"time"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
clientset "k8s.io/client-go/kubernetes"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
Kb int64 = 1000
Mb int64 = 1000 * Kb
Gb int64 = 1000 * Mb
Tb int64 = 1000 * Gb
KiB int64 = 1024
MiB int64 = 1024 * KiB
GiB int64 = 1024 * MiB
TiB int64 = 1024 * GiB
// Waiting period for volume server (Ceph, ...) to initialize itself.
VolumeServerPodStartupTimeout = 3 * time.Minute
// Waiting period for pod to be cleaned up and unmount its volumes so we
// don't tear down containers with NFS/Ceph/Gluster server too early.
PodCleanupTimeout = 20 * time.Second
)
// Configuration of one tests. The test consist of:
// - server pod - runs serverImage, exports ports[]
// - client pod - does not need any special configuration
type VolumeTestConfig struct {
Namespace string
// Prefix of all pods. Typically the test name.
Prefix string
// Name of container image for the server pod.
ServerImage string
// Ports to export from the server pod. TCP only.
ServerPorts []int
// Commands to run in the container image.
ServerCmds []string
// Arguments to pass to the container image.
ServerArgs []string
// Volumes needed to be mounted to the server container from the host
// map <host (source) path> -> <container (dst.) path>
// if <host (source) path> is empty, mount a tmpfs emptydir
ServerVolumes map[string]string
// Message to wait for before starting clients
ServerReadyMessage string
// Wait for the pod to terminate successfully
// False indicates that the pod is long running
WaitForCompletion bool
// ServerNodeName is the spec.nodeName to run server pod on. Default is any node.
ServerNodeName string
// ClientNodeName is the spec.nodeName to run client pod on. Default is any node.
ClientNodeName string
// NodeSelector to use in pod spec (server, client and injector pods).
NodeSelector map[string]string
}
// VolumeTest contains a volume to mount into a client pod and its
// expected content.
type VolumeTest struct {
Volume v1.VolumeSource
File string
ExpectedContent string
}
// NFS-specific wrapper for CreateStorageServer.
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "nfs",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
ServerPorts: []int{2049},
ServerVolumes: map[string]string{"": "/exports"},
ServerReadyMessage: "NFS started",
}
if len(args) > 0 {
config.ServerArgs = args
}
pod, ip = CreateStorageServer(cs, config)
return config, pod, ip
}
// GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
func NewGlusterfsServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "gluster",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
ServerPorts: []int{24007, 24008, 49152},
}
pod, ip = CreateStorageServer(cs, config)
By("creating Gluster endpoints")
endpoints := &v1.Endpoints{
TypeMeta: metav1.TypeMeta{
Kind: "Endpoints",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-server",
},
Subsets: []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{
{
IP: ip,
},
},
Ports: []v1.EndpointPort{
{
Name: "gluster",
Port: 24007,
Protocol: v1.ProtocolTCP,
},
},
},
},
}
endpoints, err := cs.CoreV1().Endpoints(namespace).Create(endpoints)
Expect(err).NotTo(HaveOccurred(), "failed to create endpoints for Gluster server")
return config, pod, ip
}
// iSCSI-specific wrapper for CreateStorageServer.
func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "iscsi",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer),
ServerPorts: []int{3260},
ServerVolumes: map[string]string{
// iSCSI container needs to insert modules from the host
"/lib/modules": "/lib/modules",
},
ServerReadyMessage: "Configuration restored from /etc/target/saveconfig.json",
}
pod, ip = CreateStorageServer(cs, config)
return config, pod, ip
}
// CephRBD-specific wrapper for CreateStorageServer.
func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "rbd",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer),
ServerPorts: []int{6789},
ServerVolumes: map[string]string{
"/lib/modules": "/lib/modules",
},
ServerReadyMessage: "Ceph is ready",
}
pod, ip = CreateStorageServer(cs, config)
// create secrets for the server
secret = &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-secret",
},
Data: map[string][]byte{
// from test/images/volumes-tester/rbd/keyring
"key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="),
},
Type: "kubernetes.io/rbd",
}
secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret)
if err != nil {
Failf("Failed to create secrets for Ceph RBD: %v", err)
}
return config, pod, secret, ip
}
// Wrapper for StartVolumeServer(). A storage server config is passed in, and a pod pointer
// and ip address string are returned.
// Note: Expect() is called so no error is returned.
func CreateStorageServer(cs clientset.Interface, config VolumeTestConfig) (pod *v1.Pod, ip string) {
pod = StartVolumeServer(cs, config)
Expect(pod).NotTo(BeNil(), "storage server pod should not be nil")
ip = pod.Status.PodIP
Expect(len(ip)).NotTo(BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
Logf("%s server pod IP address: %s", config.Prefix, ip)
return pod, ip
}
// Starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.
func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
portCount := len(config.ServerPorts)
serverPodPorts := make([]v1.ContainerPort, portCount)
for i := 0; i < portCount; i++ {
portName := fmt.Sprintf("%s-%d", config.Prefix, i)
serverPodPorts[i] = v1.ContainerPort{
Name: portName,
ContainerPort: int32(config.ServerPorts[i]),
Protocol: v1.ProtocolTCP,
}
}
volumeCount := len(config.ServerVolumes)
volumes := make([]v1.Volume, volumeCount)
mounts := make([]v1.VolumeMount, volumeCount)
i := 0
for src, dst := range config.ServerVolumes {
mountName := fmt.Sprintf("path%d", i)
volumes[i].Name = mountName
if src == "" {
volumes[i].VolumeSource.EmptyDir = &v1.EmptyDirVolumeSource{}
} else {
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
Path: src,
}
}
mounts[i].Name = mountName
mounts[i].ReadOnly = false
mounts[i].MountPath = dst
i++
}
serverPodName := fmt.Sprintf("%s-server", config.Prefix)
By(fmt.Sprint("creating ", serverPodName, " pod"))
privileged := new(bool)
*privileged = true
restartPolicy := v1.RestartPolicyAlways
if config.WaitForCompletion {
restartPolicy = v1.RestartPolicyNever
}
serverPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: serverPodName,
Labels: map[string]string{
"role": serverPodName,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: serverPodName,
Image: config.ServerImage,
SecurityContext: &v1.SecurityContext{
Privileged: privileged,
},
Command: config.ServerCmds,
Args: config.ServerArgs,
Ports: serverPodPorts,
VolumeMounts: mounts,
},
},
Volumes: volumes,
RestartPolicy: restartPolicy,
NodeName: config.ServerNodeName,
NodeSelector: config.NodeSelector,
},
}
var pod *v1.Pod
serverPod, err := podClient.Create(serverPod)
// ok if the server pod already exists. TODO: make this controllable by callers
if err != nil {
if apierrs.IsAlreadyExists(err) {
Logf("Ignore \"already-exists\" error, re-get pod...")
By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{})
ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
pod = serverPod
} else {
ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
}
}
if config.WaitForCompletion {
ExpectNoError(WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
ExpectNoError(podClient.Delete(serverPod.Name, nil))
} else {
ExpectNoError(WaitForPodRunningInNamespace(client, serverPod))
if pod == nil {
By(fmt.Sprintf("locating the %q server pod", serverPodName))
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
}
}
if config.ServerReadyMessage != "" {
_, err := LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
}
return pod
}
// Wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function.
func CleanUpVolumeServer(f *Framework, serverPod *v1.Pod) {
CleanUpVolumeServerWithSecret(f, serverPod, nil)
}
// Wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function.
func CleanUpVolumeServerWithSecret(f *Framework, serverPod *v1.Pod, secret *v1.Secret) {
cs := f.ClientSet
ns := f.Namespace
if secret != nil {
Logf("Deleting server secret %q...", secret.Name)
err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{})
if err != nil {
Logf("Delete secret failed: %v", err)
}
}
Logf("Deleting server pod %q...", serverPod.Name)
err := DeletePodWithWait(f, cs, serverPod)
if err != nil {
Logf("Server pod delete failed: %v", err)
}
}
// Clean both server and client pods.
func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
By(fmt.Sprint("cleaning the environment after ", config.Prefix))
defer GinkgoRecover()
cs := f.ClientSet
err := DeletePodWithWaitByName(f, cs, config.Prefix+"-client", config.Namespace)
Expect(err).To(BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-client", config.Namespace)
if config.ServerImage != "" {
err := DeletePodWithWaitByName(f, cs, config.Prefix+"-server", config.Namespace)
Expect(err).To(BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
}
}
// Start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees expected data, e.g. from the server pod.
// Multiple VolumeTests can be specified to mount multiple volumes to a single
// pod.
func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGroup *int64, fsType string, tests []VolumeTest) {
By(fmt.Sprint("starting ", config.Prefix, " client"))
var gracePeriod int64 = 1
clientPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-client",
Labels: map[string]string{
"role": config.Prefix + "-client",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.Prefix + "-client",
Image: BusyBoxImage,
WorkingDir: "/opt",
// An imperative and easily debuggable container which reads vol contents for
// us to scan in the tests or by eye.
// We expect that /opt is empty in the minimal containers which we use in this test.
Command: []string{
"/bin/sh",
"-c",
"while true ; do cat /opt/0/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
},
VolumeMounts: []v1.VolumeMount{},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
Volumes: []v1.Volume{},
NodeName: config.ClientNodeName,
NodeSelector: config.NodeSelector,
},
}
podsNamespacer := client.CoreV1().Pods(config.Namespace)
clientPod.Spec.SecurityContext.FSGroup = fsGroup
for i, test := range tests {
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/opt/%d", i),
})
clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
Name: volumeName,
VolumeSource: test.Volume,
})
}
clientPod, err := podsNamespacer.Create(clientPod)
if err != nil {
Failf("Failed to create %s pod: %v", clientPod.Name, err)
}
ExpectNoError(WaitForPodRunningInNamespace(client, clientPod))
By("Checking that text file contents are perfect.")
for i, test := range tests {
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"cat", fileName}, test.ExpectedContent, time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file %s.", fileName)
}
if fsGroup != nil {
By("Checking fsGroup is correct.")
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: getting the right privileges in the file %v", int(*fsGroup))
}
if fsType != "" {
By("Checking fsType is correct.")
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"grep", " /opt/0 ", "/proc/mounts"}, fsType, time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: getting the right fsType %s", fsType)
}
}
// Insert index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there.
// The volume must be writable.
func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
By(fmt.Sprint("starting ", config.Prefix, " injector"))
podClient := client.CoreV1().Pods(config.Namespace)
podName := fmt.Sprintf("%s-injector-%s", config.Prefix, rand.String(4))
volMountName := fmt.Sprintf("%s-volume-%s", config.Prefix, rand.String(4))
privileged := true
injectPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{
"role": config.Prefix + "-injector",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.Prefix + "-injector",
Image: BusyBoxImage,
Command: []string{"/bin/sh"},
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
VolumeMounts: []v1.VolumeMount{
{
Name: volMountName,
MountPath: "/mnt",
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volMountName,
VolumeSource: volume,
},
},
NodeName: config.ClientNodeName,
NodeSelector: config.NodeSelector,
},
}
defer func() {
podClient.Delete(podName, nil)
}()
injectPod, err := podClient.Create(injectPod)
ExpectNoError(err, "Failed to create injector pod: %v", err)
err = WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
Expect(err).NotTo(HaveOccurred())
}
func CreateGCEVolume() (*v1.PersistentVolumeSource, string) {
diskName, err := CreatePDWithRetry()
ExpectNoError(err)
return &v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
FSType: "ext3",
ReadOnly: false,
},
}, diskName
}
Wait for first pod to termiante in local pv test
local plugin will skip setting fsGroup if volume is mounted by other pods
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that various VolumeSources are working.
*
* There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file
* into web server root and loadind the index.html from it.
*
* These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
* other privileged magic in the server pod.
*
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/
package framework
import (
"fmt"
"strconv"
"time"
"k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/rand"
clientset "k8s.io/client-go/kubernetes"
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
Kb int64 = 1000
Mb int64 = 1000 * Kb
Gb int64 = 1000 * Mb
Tb int64 = 1000 * Gb
KiB int64 = 1024
MiB int64 = 1024 * KiB
GiB int64 = 1024 * MiB
TiB int64 = 1024 * GiB
// Waiting period for volume server (Ceph, ...) to initialize itself.
VolumeServerPodStartupTimeout = 3 * time.Minute
// Waiting period for pod to be cleaned up and unmount its volumes so we
// don't tear down containers with NFS/Ceph/Gluster server too early.
PodCleanupTimeout = 20 * time.Second
)
// Configuration of one tests. The test consist of:
// - server pod - runs serverImage, exports ports[]
// - client pod - does not need any special configuration
type VolumeTestConfig struct {
Namespace string
// Prefix of all pods. Typically the test name.
Prefix string
// Name of container image for the server pod.
ServerImage string
// Ports to export from the server pod. TCP only.
ServerPorts []int
// Commands to run in the container image.
ServerCmds []string
// Arguments to pass to the container image.
ServerArgs []string
// Volumes needed to be mounted to the server container from the host
// map <host (source) path> -> <container (dst.) path>
// if <host (source) path> is empty, mount a tmpfs emptydir
ServerVolumes map[string]string
// Message to wait for before starting clients
ServerReadyMessage string
// Wait for the pod to terminate successfully
// False indicates that the pod is long running
WaitForCompletion bool
// ServerNodeName is the spec.nodeName to run server pod on. Default is any node.
ServerNodeName string
// ClientNodeName is the spec.nodeName to run client pod on. Default is any node.
ClientNodeName string
// NodeSelector to use in pod spec (server, client and injector pods).
NodeSelector map[string]string
}
// VolumeTest contains a volume to mount into a client pod and its
// expected content.
type VolumeTest struct {
Volume v1.VolumeSource
File string
ExpectedContent string
}
// NFS-specific wrapper for CreateStorageServer.
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "nfs",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
ServerPorts: []int{2049},
ServerVolumes: map[string]string{"": "/exports"},
ServerReadyMessage: "NFS started",
}
if len(args) > 0 {
config.ServerArgs = args
}
pod, ip = CreateStorageServer(cs, config)
return config, pod, ip
}
// GlusterFS-specific wrapper for CreateStorageServer. Also creates the gluster endpoints object.
func NewGlusterfsServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "gluster",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeGlusterServer),
ServerPorts: []int{24007, 24008, 49152},
}
pod, ip = CreateStorageServer(cs, config)
By("creating Gluster endpoints")
endpoints := &v1.Endpoints{
TypeMeta: metav1.TypeMeta{
Kind: "Endpoints",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-server",
},
Subsets: []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{
{
IP: ip,
},
},
Ports: []v1.EndpointPort{
{
Name: "gluster",
Port: 24007,
Protocol: v1.ProtocolTCP,
},
},
},
},
}
endpoints, err := cs.CoreV1().Endpoints(namespace).Create(endpoints)
Expect(err).NotTo(HaveOccurred(), "failed to create endpoints for Gluster server")
return config, pod, ip
}
// iSCSI-specific wrapper for CreateStorageServer.
func NewISCSIServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "iscsi",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer),
ServerPorts: []int{3260},
ServerVolumes: map[string]string{
// iSCSI container needs to insert modules from the host
"/lib/modules": "/lib/modules",
},
ServerReadyMessage: "Configuration restored from /etc/target/saveconfig.json",
}
pod, ip = CreateStorageServer(cs, config)
return config, pod, ip
}
// CephRBD-specific wrapper for CreateStorageServer.
func NewRBDServer(cs clientset.Interface, namespace string) (config VolumeTestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
config = VolumeTestConfig{
Namespace: namespace,
Prefix: "rbd",
ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer),
ServerPorts: []int{6789},
ServerVolumes: map[string]string{
"/lib/modules": "/lib/modules",
},
ServerReadyMessage: "Ceph is ready",
}
pod, ip = CreateStorageServer(cs, config)
// create secrets for the server
secret = &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-secret",
},
Data: map[string][]byte{
// from test/images/volumes-tester/rbd/keyring
"key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="),
},
Type: "kubernetes.io/rbd",
}
secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret)
if err != nil {
Failf("Failed to create secrets for Ceph RBD: %v", err)
}
return config, pod, secret, ip
}
// Wrapper for StartVolumeServer(). A storage server config is passed in, and a pod pointer
// and ip address string are returned.
// Note: Expect() is called so no error is returned.
func CreateStorageServer(cs clientset.Interface, config VolumeTestConfig) (pod *v1.Pod, ip string) {
pod = StartVolumeServer(cs, config)
Expect(pod).NotTo(BeNil(), "storage server pod should not be nil")
ip = pod.Status.PodIP
Expect(len(ip)).NotTo(BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
Logf("%s server pod IP address: %s", config.Prefix, ip)
return pod, ip
}
// Starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.
func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
portCount := len(config.ServerPorts)
serverPodPorts := make([]v1.ContainerPort, portCount)
for i := 0; i < portCount; i++ {
portName := fmt.Sprintf("%s-%d", config.Prefix, i)
serverPodPorts[i] = v1.ContainerPort{
Name: portName,
ContainerPort: int32(config.ServerPorts[i]),
Protocol: v1.ProtocolTCP,
}
}
volumeCount := len(config.ServerVolumes)
volumes := make([]v1.Volume, volumeCount)
mounts := make([]v1.VolumeMount, volumeCount)
i := 0
for src, dst := range config.ServerVolumes {
mountName := fmt.Sprintf("path%d", i)
volumes[i].Name = mountName
if src == "" {
volumes[i].VolumeSource.EmptyDir = &v1.EmptyDirVolumeSource{}
} else {
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
Path: src,
}
}
mounts[i].Name = mountName
mounts[i].ReadOnly = false
mounts[i].MountPath = dst
i++
}
serverPodName := fmt.Sprintf("%s-server", config.Prefix)
By(fmt.Sprint("creating ", serverPodName, " pod"))
privileged := new(bool)
*privileged = true
restartPolicy := v1.RestartPolicyAlways
if config.WaitForCompletion {
restartPolicy = v1.RestartPolicyNever
}
serverPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: serverPodName,
Labels: map[string]string{
"role": serverPodName,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: serverPodName,
Image: config.ServerImage,
SecurityContext: &v1.SecurityContext{
Privileged: privileged,
},
Command: config.ServerCmds,
Args: config.ServerArgs,
Ports: serverPodPorts,
VolumeMounts: mounts,
},
},
Volumes: volumes,
RestartPolicy: restartPolicy,
NodeName: config.ServerNodeName,
NodeSelector: config.NodeSelector,
},
}
var pod *v1.Pod
serverPod, err := podClient.Create(serverPod)
// ok if the server pod already exists. TODO: make this controllable by callers
if err != nil {
if apierrs.IsAlreadyExists(err) {
Logf("Ignore \"already-exists\" error, re-get pod...")
By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
serverPod, err = podClient.Get(serverPodName, metav1.GetOptions{})
ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
pod = serverPod
} else {
ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
}
}
if config.WaitForCompletion {
ExpectNoError(WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
ExpectNoError(podClient.Delete(serverPod.Name, nil))
} else {
ExpectNoError(WaitForPodRunningInNamespace(client, serverPod))
if pod == nil {
By(fmt.Sprintf("locating the %q server pod", serverPodName))
pod, err = podClient.Get(serverPodName, metav1.GetOptions{})
ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
}
}
if config.ServerReadyMessage != "" {
_, err := LookForStringInLog(pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
}
return pod
}
// Wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function.
func CleanUpVolumeServer(f *Framework, serverPod *v1.Pod) {
CleanUpVolumeServerWithSecret(f, serverPod, nil)
}
// Wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function.
func CleanUpVolumeServerWithSecret(f *Framework, serverPod *v1.Pod, secret *v1.Secret) {
cs := f.ClientSet
ns := f.Namespace
if secret != nil {
Logf("Deleting server secret %q...", secret.Name)
err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{})
if err != nil {
Logf("Delete secret failed: %v", err)
}
}
Logf("Deleting server pod %q...", serverPod.Name)
err := DeletePodWithWait(f, cs, serverPod)
if err != nil {
Logf("Server pod delete failed: %v", err)
}
}
// Clean both server and client pods.
func VolumeTestCleanup(f *Framework, config VolumeTestConfig) {
By(fmt.Sprint("cleaning the environment after ", config.Prefix))
defer GinkgoRecover()
cs := f.ClientSet
err := DeletePodWithWaitByName(f, cs, config.Prefix+"-client", config.Namespace)
Expect(err).To(BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-client", config.Namespace)
if config.ServerImage != "" {
err := DeletePodWithWaitByName(f, cs, config.Prefix+"-server", config.Namespace)
Expect(err).To(BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
}
}
// Start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees expected data, e.g. from the server pod.
// Multiple VolumeTests can be specified to mount multiple volumes to a single
// pod.
func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGroup *int64, fsType string, tests []VolumeTest) {
By(fmt.Sprint("starting ", config.Prefix, " client"))
var gracePeriod int64 = 1
clientPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-client",
Labels: map[string]string{
"role": config.Prefix + "-client",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.Prefix + "-client",
Image: BusyBoxImage,
WorkingDir: "/opt",
// An imperative and easily debuggable container which reads vol contents for
// us to scan in the tests or by eye.
// We expect that /opt is empty in the minimal containers which we use in this test.
Command: []string{
"/bin/sh",
"-c",
"while true ; do cat /opt/0/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
},
VolumeMounts: []v1.VolumeMount{},
},
},
TerminationGracePeriodSeconds: &gracePeriod,
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
Volumes: []v1.Volume{},
NodeName: config.ClientNodeName,
NodeSelector: config.NodeSelector,
},
}
podsNamespacer := client.CoreV1().Pods(config.Namespace)
clientPod.Spec.SecurityContext.FSGroup = fsGroup
for i, test := range tests {
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/opt/%d", i),
})
clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
Name: volumeName,
VolumeSource: test.Volume,
})
}
clientPod, err := podsNamespacer.Create(clientPod)
if err != nil {
Failf("Failed to create %s pod: %v", clientPod.Name, err)
}
ExpectNoError(WaitForPodRunningInNamespace(client, clientPod))
By("Checking that text file contents are perfect.")
for i, test := range tests {
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"cat", fileName}, test.ExpectedContent, time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file %s.", fileName)
}
if fsGroup != nil {
By("Checking fsGroup is correct.")
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"ls", "-ld", "/opt/0"}, strconv.Itoa(int(*fsGroup)), time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: getting the right privileges in the file %v", int(*fsGroup))
}
if fsType != "" {
By("Checking fsType is correct.")
_, err = LookForStringInPodExec(config.Namespace, clientPod.Name, []string{"grep", " /opt/0 ", "/proc/mounts"}, fsType, time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: getting the right fsType %s", fsType)
}
}
// Insert index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there.
// The volume must be writable.
func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
By(fmt.Sprint("starting ", config.Prefix, " injector"))
podClient := client.CoreV1().Pods(config.Namespace)
podName := fmt.Sprintf("%s-injector-%s", config.Prefix, rand.String(4))
volMountName := fmt.Sprintf("%s-volume-%s", config.Prefix, rand.String(4))
privileged := true
injectPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{
"role": config.Prefix + "-injector",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.Prefix + "-injector",
Image: BusyBoxImage,
Command: []string{"/bin/sh"},
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
VolumeMounts: []v1.VolumeMount{
{
Name: volMountName,
MountPath: "/mnt",
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volMountName,
VolumeSource: volume,
},
},
NodeName: config.ClientNodeName,
NodeSelector: config.NodeSelector,
},
}
defer func() {
podClient.Delete(podName, nil)
err := waitForPodNotFoundInNamespace(client, podName, injectPod.Namespace, PodDeleteTimeout)
Expect(err).NotTo(HaveOccurred())
}()
injectPod, err := podClient.Create(injectPod)
ExpectNoError(err, "Failed to create injector pod: %v", err)
err = WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
Expect(err).NotTo(HaveOccurred())
}
func CreateGCEVolume() (*v1.PersistentVolumeSource, string) {
diskName, err := CreatePDWithRetry()
ExpectNoError(err)
return &v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
FSType: "ext3",
ReadOnly: false,
},
}, diskName
}
|
package jenkins
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"regexp"
"strings"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util/wait"
exutil "github.com/openshift/origin/test/extended/util"
)
// JenkinsRef represents a Jenkins instance running on an OpenShift server
type JenkinsRef struct {
oc *exutil.CLI
host string
port string
// The namespace in which the Jenkins server is running
namespace string
password string
}
// FlowDefinition can be marshalled into XML to represent a Jenkins workflow job definition.
type FlowDefinition struct {
XMLName xml.Name `xml:"flow-definition"`
Plugin string `xml:"plugin,attr"`
KeepDependencies bool `xml:"keepDependencies"`
Definition Definition
}
// Definition is part of a FlowDefinition
type Definition struct {
XMLName xml.Name `xml:"definition"`
Class string `xml:"class,attr"`
Plugin string `xml:"plugin,attr"`
Script string `xml:"script"`
}
// ginkgolog creates simple entry in the GinkgoWriter.
func ginkgolog(format string, a ...interface{}) {
fmt.Fprintf(g.GinkgoWriter, format+"\n", a...)
}
// NewRef creates a jenkins reference from an OC client
func NewRef(oc *exutil.CLI) *JenkinsRef {
g.By("get ip and port for jenkins service")
serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("get admin password")
password := GetAdminPassword(oc)
o.Expect(password).ShouldNot(o.BeEmpty())
j := &JenkinsRef{
oc: oc,
host: serviceIP,
port: port,
namespace: oc.Namespace(),
password: password,
}
return j
}
// Namespace returns the Jenkins namespace
func (j *JenkinsRef) Namespace() string {
return j.namespace
}
// BuildURI builds a URI for the Jenkins server.
func (j *JenkinsRef) BuildURI(resourcePathFormat string, a ...interface{}) string {
resourcePath := fmt.Sprintf(resourcePathFormat, a...)
return fmt.Sprintf("http://%s:%v/%s", j.host, j.port, resourcePath)
}
// GetResource submits a GET request to this Jenkins server.
// Returns a response body and status code or an error.
func (j *JenkinsRef) GetResource(resourcePathFormat string, a ...interface{}) (string, int, error) {
uri := j.BuildURI(resourcePathFormat, a...)
ginkgolog("Retrieving Jenkins resource: %q", uri)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return "", 0, fmt.Errorf("Unable to build request for uri %q: %v", uri, err)
}
// http://stackoverflow.com/questions/17714494/golang-http-request-results-in-eof-errors-when-making-multiple-requests-successi
req.Close = true
req.SetBasicAuth("admin", j.password)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return "", 0, fmt.Errorf("Unable to GET uri %q: %v", uri, err)
}
defer resp.Body.Close()
status := resp.StatusCode
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", 0, fmt.Errorf("Error reading GET response %q: %v", uri, err)
}
return string(body), status, nil
}
// Post sends a POST to the Jenkins server. Returns response body and status code or an error.
func (j *JenkinsRef) Post(reqBody io.Reader, resourcePathFormat, contentType string, a ...interface{}) (string, int, error) {
uri := j.BuildURI(resourcePathFormat, a...)
req, err := http.NewRequest("POST", uri, reqBody)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
// http://stackoverflow.com/questions/17714494/golang-http-request-results-in-eof-errors-when-making-multiple-requests-successi
req.Close = true
if reqBody != nil {
req.Header.Set("Content-Type", contentType)
req.Header.Del("Expect") // jenkins will return 417 if we have an expect hdr
}
req.SetBasicAuth("admin", j.password)
client := &http.Client{}
ginkgolog("Posting to Jenkins resource: %q", uri)
resp, err := client.Do(req)
if err != nil {
return "", 0, fmt.Errorf("Error posting request to %q: %v", uri, err)
}
defer resp.Body.Close()
status := resp.StatusCode
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", 0, fmt.Errorf("Error reading Post response body %q: %v", uri, err)
}
return string(body), status, nil
}
// PostXML sends a POST to the Jenkins server. If a body is specified, it should be XML.
// Returns response body and status code or an error.
func (j *JenkinsRef) PostXML(reqBody io.Reader, resourcePathFormat string, a ...interface{}) (string, int, error) {
return j.Post(reqBody, resourcePathFormat, "application/xml", a...)
}
// GetResourceWithStatus repeatedly tries to GET a jenkins resource with an acceptable
// HTTP status. Retries for the specified duration.
func (j *JenkinsRef) GetResourceWithStatus(validStatusList []int, timeout time.Duration, resourcePathFormat string, a ...interface{}) (string, int, error) {
var retBody string
var retStatus int
err := wait.Poll(10*time.Second, timeout, func() (bool, error) {
body, status, err := j.GetResource(resourcePathFormat, a...)
if err != nil {
ginkgolog("Error accessing resource: %v", err)
return false, nil
}
var found bool
for _, s := range validStatusList {
if status == s {
found = true
break
}
}
if !found {
ginkgolog("Expected http status [%v] during GET by recevied [%v]", validStatusList, status)
return false, nil
}
retBody = body
retStatus = status
return true, nil
})
if err != nil {
uri := j.BuildURI(resourcePathFormat, a...)
return "", retStatus, fmt.Errorf("Error waiting for status %v from resource path %s: %v", validStatusList, uri, err)
}
return retBody, retStatus, nil
}
// WaitForContent waits for a particular HTTP status and HTML matching a particular
// pattern to be returned by this Jenkins server. An error will be returned
// if the condition is not matched within the timeout period.
func (j *JenkinsRef) WaitForContent(verificationRegEx string, verificationStatus int, timeout time.Duration, resourcePathFormat string, a ...interface{}) (string, error) {
var matchingContent = ""
err := wait.Poll(10*time.Second, timeout, func() (bool, error) {
content, _, err := j.GetResourceWithStatus([]int{verificationStatus}, timeout, resourcePathFormat, a...)
if err != nil {
return false, nil
}
if len(verificationRegEx) > 0 {
re := regexp.MustCompile(verificationRegEx)
if re.MatchString(content) {
matchingContent = content
return true, nil
} else {
ginkgolog("Content did not match verification regex %q:\n %v", verificationRegEx, content)
return false, nil
}
} else {
matchingContent = content
return true, nil
}
})
if err != nil {
uri := j.BuildURI(resourcePathFormat, a...)
return "", fmt.Errorf("Error waiting for status %v and verification regex %q from resource path %s: %v", verificationStatus, verificationRegEx, uri, err)
} else {
return matchingContent, nil
}
}
// CreateItem submits XML to create a named item on the Jenkins server.
func (j *JenkinsRef) CreateItem(name string, itemDefXML string) {
g.By(fmt.Sprintf("Creating new jenkins item: %s", name))
_, status, err := j.PostXML(bytes.NewBufferString(itemDefXML), "CreateItem?name=%s", name)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
o.ExpectWithOffset(1, status).To(o.Equal(200))
}
// GetJobBuildNumber returns the current buildNumber on the named project OR "new" if
// there are no builds against a job yet.
func (j *JenkinsRef) GetJobBuildNumber(name string, timeout time.Duration) (string, error) {
body, status, err := j.GetResourceWithStatus([]int{200, 404}, timeout, "job/%s/lastBuild/buildNumber", name)
if err != nil {
return "", err
}
if status != 200 {
return "new", nil
}
return body, nil
}
// StartJob triggers a named Jenkins job. The job can be monitored with the
// returned object.
func (j *JenkinsRef) StartJob(jobName string) *JobMon {
lastBuildNumber, err := j.GetJobBuildNumber(jobName, time.Minute)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
jmon := &JobMon{
j: j,
lastBuildNumber: lastBuildNumber,
buildNumber: "",
jobName: jobName,
}
ginkgolog("Current timestamp for [%s]: %q", jobName, jmon.lastBuildNumber)
g.By(fmt.Sprintf("Starting jenkins job: %s", jobName))
_, status, err := j.PostXML(nil, "job/%s/build?delay=0sec", jobName)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
o.ExpectWithOffset(1, status).To(o.Equal(201))
return jmon
}
// ReadJenkinsJobUsingVars returns the content of a Jenkins job XML file. Instances of the
// string "PROJECT_NAME" are replaced with the specified namespace.
// Variables named in the vars map will also be replaced with their
// corresponding value.
func (j *JenkinsRef) ReadJenkinsJobUsingVars(filename, namespace string, vars map[string]string) string {
pre := exutil.FixturePath("testdata", "jenkins-plugin", filename)
post := exutil.ArtifactPath(filename)
if vars == nil {
vars = map[string]string{}
}
vars["PROJECT_NAME"] = namespace
err := exutil.VarSubOnFile(pre, post, vars)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
data, err := ioutil.ReadFile(post)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
return string(data)
}
// ReadJenkinsJob returns the content of a Jenkins job XML file. Instances of the
// string "PROJECT_NAME" are replaced with the specified namespace.
func (j *JenkinsRef) ReadJenkinsJob(filename, namespace string) string {
return j.ReadJenkinsJobUsingVars(filename, namespace, nil)
}
// BuildDSLJob returns an XML string defining a Jenkins workflow/pipeline DSL job. Instances of the
// string "PROJECT_NAME" are replaced with the specified namespace.
func (j *JenkinsRef) BuildDSLJob(namespace string, scriptLines ...string) (string, error) {
script := strings.Join(scriptLines, "\n")
script = strings.Replace(script, "PROJECT_NAME", namespace, -1)
fd := FlowDefinition{
Plugin: "workflow-job@2.7",
Definition: Definition{
Class: "org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition",
Plugin: "workflow-cps@2.18",
Script: script,
},
}
output, err := xml.MarshalIndent(fd, " ", " ")
ginkgolog("Formulated DSL Project XML:\n%s\n\n", output)
return string(output), err
}
// GetJobConsoleLogs returns the console logs of a particular buildNumber.
func (j *JenkinsRef) GetJobConsoleLogs(jobName, buildNumber string) (string, error) {
return j.WaitForContent("", 200, 10*time.Minute, "job/%s/%s/consoleText", jobName, buildNumber)
}
// GetLastJobConsoleLogs returns the last build associated with a Jenkins job.
func (j *JenkinsRef) GetLastJobConsoleLogs(jobName string) (string, error) {
return j.GetJobConsoleLogs(jobName, "lastBuild")
}
func GetAdminPassword(oc *exutil.CLI) string {
envs, err := oc.Run("set").Args("env", "dc/jenkins", "--list").Output()
o.Expect(err).NotTo(o.HaveOccurred())
kvs := strings.Split(envs, "\n")
for _, kv := range kvs {
if strings.HasPrefix(kv, "JENKINS_PASSWORD=") {
s := strings.Split(kv, "=")
fmt.Fprintf(g.GinkgoWriter, "\nJenkins admin password %s\n", s[1])
return s[1]
}
}
return "password"
}
// Finds the pod running Jenkins
func FindJenkinsPod(oc *exutil.CLI) *kapi.Pod {
pods, err := exutil.GetDeploymentConfigPods(oc, "jenkins")
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
if pods == nil || pods.Items == nil {
g.Fail("No pods matching jenkins deploymentconfig in namespace " + oc.Namespace())
}
o.ExpectWithOffset(1, len(pods.Items)).To(o.Equal(1))
return &pods.Items[0]
}
Fix 404 error in Jenkins plugin extended tests
package jenkins
import (
"bytes"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"regexp"
"strings"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/util/wait"
exutil "github.com/openshift/origin/test/extended/util"
)
// JenkinsRef represents a Jenkins instance running on an OpenShift server
type JenkinsRef struct {
oc *exutil.CLI
host string
port string
// The namespace in which the Jenkins server is running
namespace string
password string
}
// FlowDefinition can be marshalled into XML to represent a Jenkins workflow job definition.
type FlowDefinition struct {
XMLName xml.Name `xml:"flow-definition"`
Plugin string `xml:"plugin,attr"`
KeepDependencies bool `xml:"keepDependencies"`
Definition Definition
}
// Definition is part of a FlowDefinition
type Definition struct {
XMLName xml.Name `xml:"definition"`
Class string `xml:"class,attr"`
Plugin string `xml:"plugin,attr"`
Script string `xml:"script"`
}
// ginkgolog creates simple entry in the GinkgoWriter.
func ginkgolog(format string, a ...interface{}) {
fmt.Fprintf(g.GinkgoWriter, format+"\n", a...)
}
// NewRef creates a jenkins reference from an OC client
func NewRef(oc *exutil.CLI) *JenkinsRef {
g.By("get ip and port for jenkins service")
serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output()
o.Expect(err).NotTo(o.HaveOccurred())
g.By("get admin password")
password := GetAdminPassword(oc)
o.Expect(password).ShouldNot(o.BeEmpty())
j := &JenkinsRef{
oc: oc,
host: serviceIP,
port: port,
namespace: oc.Namespace(),
password: password,
}
return j
}
// Namespace returns the Jenkins namespace
func (j *JenkinsRef) Namespace() string {
return j.namespace
}
// BuildURI builds a URI for the Jenkins server.
func (j *JenkinsRef) BuildURI(resourcePathFormat string, a ...interface{}) string {
resourcePath := fmt.Sprintf(resourcePathFormat, a...)
return fmt.Sprintf("http://%s:%v/%s", j.host, j.port, resourcePath)
}
// GetResource submits a GET request to this Jenkins server.
// Returns a response body and status code or an error.
func (j *JenkinsRef) GetResource(resourcePathFormat string, a ...interface{}) (string, int, error) {
uri := j.BuildURI(resourcePathFormat, a...)
ginkgolog("Retrieving Jenkins resource: %q", uri)
req, err := http.NewRequest("GET", uri, nil)
if err != nil {
return "", 0, fmt.Errorf("Unable to build request for uri %q: %v", uri, err)
}
// http://stackoverflow.com/questions/17714494/golang-http-request-results-in-eof-errors-when-making-multiple-requests-successi
req.Close = true
req.SetBasicAuth("admin", j.password)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return "", 0, fmt.Errorf("Unable to GET uri %q: %v", uri, err)
}
defer resp.Body.Close()
status := resp.StatusCode
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", 0, fmt.Errorf("Error reading GET response %q: %v", uri, err)
}
return string(body), status, nil
}
// Post sends a POST to the Jenkins server. Returns response body and status code or an error.
func (j *JenkinsRef) Post(reqBody io.Reader, resourcePathFormat, contentType string, a ...interface{}) (string, int, error) {
uri := j.BuildURI(resourcePathFormat, a...)
req, err := http.NewRequest("POST", uri, reqBody)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
// http://stackoverflow.com/questions/17714494/golang-http-request-results-in-eof-errors-when-making-multiple-requests-successi
req.Close = true
if reqBody != nil {
req.Header.Set("Content-Type", contentType)
req.Header.Del("Expect") // jenkins will return 417 if we have an expect hdr
}
req.SetBasicAuth("admin", j.password)
client := &http.Client{}
ginkgolog("Posting to Jenkins resource: %q", uri)
resp, err := client.Do(req)
if err != nil {
return "", 0, fmt.Errorf("Error posting request to %q: %v", uri, err)
}
defer resp.Body.Close()
status := resp.StatusCode
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", 0, fmt.Errorf("Error reading Post response body %q: %v", uri, err)
}
return string(body), status, nil
}
// PostXML sends a POST to the Jenkins server. If a body is specified, it should be XML.
// Returns response body and status code or an error.
func (j *JenkinsRef) PostXML(reqBody io.Reader, resourcePathFormat string, a ...interface{}) (string, int, error) {
return j.Post(reqBody, resourcePathFormat, "application/xml", a...)
}
// GetResourceWithStatus repeatedly tries to GET a jenkins resource with an acceptable
// HTTP status. Retries for the specified duration.
func (j *JenkinsRef) GetResourceWithStatus(validStatusList []int, timeout time.Duration, resourcePathFormat string, a ...interface{}) (string, int, error) {
var retBody string
var retStatus int
err := wait.Poll(10*time.Second, timeout, func() (bool, error) {
body, status, err := j.GetResource(resourcePathFormat, a...)
if err != nil {
ginkgolog("Error accessing resource: %v", err)
return false, nil
}
var found bool
for _, s := range validStatusList {
if status == s {
found = true
break
}
}
if !found {
ginkgolog("Expected http status [%v] during GET by recevied [%v]", validStatusList, status)
return false, nil
}
retBody = body
retStatus = status
return true, nil
})
if err != nil {
uri := j.BuildURI(resourcePathFormat, a...)
return "", retStatus, fmt.Errorf("Error waiting for status %v from resource path %s: %v", validStatusList, uri, err)
}
return retBody, retStatus, nil
}
// WaitForContent waits for a particular HTTP status and HTML matching a particular
// pattern to be returned by this Jenkins server. An error will be returned
// if the condition is not matched within the timeout period.
func (j *JenkinsRef) WaitForContent(verificationRegEx string, verificationStatus int, timeout time.Duration, resourcePathFormat string, a ...interface{}) (string, error) {
var matchingContent = ""
err := wait.Poll(10*time.Second, timeout, func() (bool, error) {
content, _, err := j.GetResourceWithStatus([]int{verificationStatus}, timeout, resourcePathFormat, a...)
if err != nil {
return false, nil
}
if len(verificationRegEx) > 0 {
re := regexp.MustCompile(verificationRegEx)
if re.MatchString(content) {
matchingContent = content
return true, nil
} else {
ginkgolog("Content did not match verification regex %q:\n %v", verificationRegEx, content)
return false, nil
}
} else {
matchingContent = content
return true, nil
}
})
if err != nil {
uri := j.BuildURI(resourcePathFormat, a...)
return "", fmt.Errorf("Error waiting for status %v and verification regex %q from resource path %s: %v", verificationStatus, verificationRegEx, uri, err)
} else {
return matchingContent, nil
}
}
// CreateItem submits XML to create a named item on the Jenkins server.
func (j *JenkinsRef) CreateItem(name string, itemDefXML string) {
g.By(fmt.Sprintf("Creating new jenkins item: %s", name))
_, status, err := j.PostXML(bytes.NewBufferString(itemDefXML), "createItem?name=%s", name)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
o.ExpectWithOffset(1, status).To(o.Equal(200))
}
// GetJobBuildNumber returns the current buildNumber on the named project OR "new" if
// there are no builds against a job yet.
func (j *JenkinsRef) GetJobBuildNumber(name string, timeout time.Duration) (string, error) {
body, status, err := j.GetResourceWithStatus([]int{200, 404}, timeout, "job/%s/lastBuild/buildNumber", name)
if err != nil {
return "", err
}
if status != 200 {
return "new", nil
}
return body, nil
}
// StartJob triggers a named Jenkins job. The job can be monitored with the
// returned object.
func (j *JenkinsRef) StartJob(jobName string) *JobMon {
lastBuildNumber, err := j.GetJobBuildNumber(jobName, time.Minute)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
jmon := &JobMon{
j: j,
lastBuildNumber: lastBuildNumber,
buildNumber: "",
jobName: jobName,
}
ginkgolog("Current timestamp for [%s]: %q", jobName, jmon.lastBuildNumber)
g.By(fmt.Sprintf("Starting jenkins job: %s", jobName))
_, status, err := j.PostXML(nil, "job/%s/build?delay=0sec", jobName)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
o.ExpectWithOffset(1, status).To(o.Equal(201))
return jmon
}
// ReadJenkinsJobUsingVars returns the content of a Jenkins job XML file. Instances of the
// string "PROJECT_NAME" are replaced with the specified namespace.
// Variables named in the vars map will also be replaced with their
// corresponding value.
func (j *JenkinsRef) ReadJenkinsJobUsingVars(filename, namespace string, vars map[string]string) string {
pre := exutil.FixturePath("testdata", "jenkins-plugin", filename)
post := exutil.ArtifactPath(filename)
if vars == nil {
vars = map[string]string{}
}
vars["PROJECT_NAME"] = namespace
err := exutil.VarSubOnFile(pre, post, vars)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
data, err := ioutil.ReadFile(post)
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
return string(data)
}
// ReadJenkinsJob returns the content of a Jenkins job XML file. Instances of the
// string "PROJECT_NAME" are replaced with the specified namespace.
func (j *JenkinsRef) ReadJenkinsJob(filename, namespace string) string {
return j.ReadJenkinsJobUsingVars(filename, namespace, nil)
}
// BuildDSLJob returns an XML string defining a Jenkins workflow/pipeline DSL job. Instances of the
// string "PROJECT_NAME" are replaced with the specified namespace.
func (j *JenkinsRef) BuildDSLJob(namespace string, scriptLines ...string) (string, error) {
script := strings.Join(scriptLines, "\n")
script = strings.Replace(script, "PROJECT_NAME", namespace, -1)
fd := FlowDefinition{
Plugin: "workflow-job@2.7",
Definition: Definition{
Class: "org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition",
Plugin: "workflow-cps@2.18",
Script: script,
},
}
output, err := xml.MarshalIndent(fd, " ", " ")
ginkgolog("Formulated DSL Project XML:\n%s\n\n", output)
return string(output), err
}
// GetJobConsoleLogs returns the console logs of a particular buildNumber.
func (j *JenkinsRef) GetJobConsoleLogs(jobName, buildNumber string) (string, error) {
return j.WaitForContent("", 200, 10*time.Minute, "job/%s/%s/consoleText", jobName, buildNumber)
}
// GetLastJobConsoleLogs returns the last build associated with a Jenkins job.
func (j *JenkinsRef) GetLastJobConsoleLogs(jobName string) (string, error) {
return j.GetJobConsoleLogs(jobName, "lastBuild")
}
func GetAdminPassword(oc *exutil.CLI) string {
envs, err := oc.Run("set").Args("env", "dc/jenkins", "--list").Output()
o.Expect(err).NotTo(o.HaveOccurred())
kvs := strings.Split(envs, "\n")
for _, kv := range kvs {
if strings.HasPrefix(kv, "JENKINS_PASSWORD=") {
s := strings.Split(kv, "=")
fmt.Fprintf(g.GinkgoWriter, "\nJenkins admin password %s\n", s[1])
return s[1]
}
}
return "password"
}
// Finds the pod running Jenkins
func FindJenkinsPod(oc *exutil.CLI) *kapi.Pod {
pods, err := exutil.GetDeploymentConfigPods(oc, "jenkins")
o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred())
if pods == nil || pods.Items == nil {
g.Fail("No pods matching jenkins deploymentconfig in namespace " + oc.Namespace())
}
o.ExpectWithOffset(1, len(pods.Items)).To(o.Equal(1))
return &pods.Items[0]
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package io
// Simple byte buffer for marshaling data.
import (
"io";
"os";
)
// TODO(r): Do better memory management.
func bytecopy(dst []byte, doff int, src []byte, soff int, count int) {
for i := 0; i < count; i++ {
dst[doff] = src[soff];
doff++;
soff++;
}
}
// A ByteBuffer is a simple implementation of the io.Read and io.Write interfaces
// connected to a buffer of bytes.
// The zero value for ByteBuffer is an empty buffer ready to use.
type ByteBuffer struct {
buf []byte;
off int; // Read from here
len int; // Write to here
cap int;
}
// Reset resets the buffer so it has no content.
func (b *ByteBuffer) Reset() {
b.off = 0;
b.len = 0;
}
// Write appends the contents of p to the buffer. The return
// value is the length of p; err is always nil.
func (b *ByteBuffer) Write(p []byte) (n int, err *os.Error) {
plen := len(p);
if len(b.buf) == 0 {
b.cap = plen + 1024;
b.buf = make([]byte, b.cap);
b.len = 0;
}
if b.len + len(p) > b.cap {
b.cap = 2*(b.cap + plen);
nb := make([]byte, b.cap);
bytecopy(nb, 0, b.buf, 0, b.len);
b.buf = nb;
}
bytecopy(b.buf, b.len, p, 0, plen);
b.len += plen;
return plen, nil;
}
// Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value is the number of bytes read; err is always nil.
func (b *ByteBuffer) Read(p []byte) (n int, err *os.Error) {
plen := len(p);
if len(b.buf) == 0 {
return 0, nil
}
if b.off == b.len { // empty buffer
b.Reset();
return 0, nil
}
if plen > b.len - b.off {
plen = b.len - b.off
}
bytecopy(p, 0, b.buf, b.off, plen);
b.off += plen;
return plen, nil;
}
// Len returns the length of the underlying buffer.
func (b *ByteBuffer) Len() int {
return b.len
}
// Off returns the location within the buffer of the next byte to be read.
func (b *ByteBuffer) Off() int {
return b.off
}
// Data returns the contents of the unread portion of the buffer.
func (b *ByteBuffer) Data() []byte {
return b.buf[b.off:b.len]
}
// NewByteBufferFromArray creates and initializes a new ByteBuffer
// with buf as its initial contents.
func NewByteBufferFromArray(buf []byte) *ByteBuffer {
b := new(ByteBuffer);
b.buf = buf;
b.off = 0;
b.len = len(buf);
b.cap = len(buf);
return b;
}
Extremely minor fix to ByteBuffer.
R=r
APPROVED=r
DELTA=1 (0 added, 0 deleted, 1 changed)
OCL=27123
CL=27130
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package io
// Simple byte buffer for marshaling data.
import (
"io";
"os";
)
// TODO(r): Do better memory management.
func bytecopy(dst []byte, doff int, src []byte, soff int, count int) {
for i := 0; i < count; i++ {
dst[doff] = src[soff];
doff++;
soff++;
}
}
// A ByteBuffer is a simple implementation of the io.Read and io.Write interfaces
// connected to a buffer of bytes.
// The zero value for ByteBuffer is an empty buffer ready to use.
type ByteBuffer struct {
buf []byte;
off int; // Read from here
len int; // Write to here
cap int;
}
// Reset resets the buffer so it has no content.
func (b *ByteBuffer) Reset() {
b.off = 0;
b.len = 0;
}
// Write appends the contents of p to the buffer. The return
// value is the length of p; err is always nil.
func (b *ByteBuffer) Write(p []byte) (n int, err *os.Error) {
plen := len(p);
if len(b.buf) == 0 {
b.cap = plen + 1024;
b.buf = make([]byte, b.cap);
b.len = 0;
}
if b.len + plen > b.cap {
b.cap = 2*(b.cap + plen);
nb := make([]byte, b.cap);
bytecopy(nb, 0, b.buf, 0, b.len);
b.buf = nb;
}
bytecopy(b.buf, b.len, p, 0, plen);
b.len += plen;
return plen, nil;
}
// Read reads the next len(p) bytes from the buffer or until the buffer
// is drained. The return value is the number of bytes read; err is always nil.
func (b *ByteBuffer) Read(p []byte) (n int, err *os.Error) {
plen := len(p);
if len(b.buf) == 0 {
return 0, nil
}
if b.off == b.len { // empty buffer
b.Reset();
return 0, nil
}
if plen > b.len - b.off {
plen = b.len - b.off
}
bytecopy(p, 0, b.buf, b.off, plen);
b.off += plen;
return plen, nil;
}
// Len returns the length of the underlying buffer.
func (b *ByteBuffer) Len() int {
return b.len
}
// Off returns the location within the buffer of the next byte to be read.
func (b *ByteBuffer) Off() int {
return b.off
}
// Data returns the contents of the unread portion of the buffer.
func (b *ByteBuffer) Data() []byte {
return b.buf[b.off:b.len]
}
// NewByteBufferFromArray creates and initializes a new ByteBuffer
// with buf as its initial contents.
func NewByteBufferFromArray(buf []byte) *ByteBuffer {
b := new(ByteBuffer);
b.buf = buf;
b.off = 0;
b.len = len(buf);
b.cap = len(buf);
return b;
}
|
// Copyright (c) 2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcjson
// GetTransactionDetailsResult models the details data from the gettransaction command.
//
// This models the "short" version of the ListTransactionsResult type, which
// excludes fields common to the transaction. These common fields are instead
// part of the GetTransactionResult.
type GetTransactionDetailsResult struct {
Account string `json:"account"`
Address string `json:"address,omitempty"`
Amount float64 `json:"amount"`
Category string `json:"category"`
InvolvesWatchOnly bool `json:"involveswatchonly,omitempty"`
Fee *float64 `json:"fee,omitempty"`
Vout uint32 `json:"vout"`
}
// GetTransactionResult models the data from the gettransaction command.
type GetTransactionResult struct {
Amount float64 `json:"amount"`
Fee float64 `json:"fee,omitempty"`
Confirmations int64 `json:"confirmations"`
BlockHash string `json:"blockhash"`
BlockIndex int64 `json:"blockindex"`
BlockTime int64 `json:"blocktime"`
TxID string `json:"txid"`
WalletConflicts []string `json:"walletconflicts"`
Time int64 `json:"time"`
TimeReceived int64 `json:"timereceived"`
Details []GetTransactionDetailsResult `json:"details"`
Hex string `json:"hex"`
}
// InfoWalletResult models the data returned by the wallet server getinfo
// command.
type InfoWalletResult struct {
Version int32 `json:"version"`
ProtocolVersion int32 `json:"protocolversion"`
WalletVersion int32 `json:"walletversion"`
Balance float64 `json:"balance"`
Blocks int32 `json:"blocks"`
TimeOffset int64 `json:"timeoffset"`
Connections int32 `json:"connections"`
Proxy string `json:"proxy"`
Difficulty float64 `json:"difficulty"`
TestNet bool `json:"testnet"`
KeypoolOldest int64 `json:"keypoololdest"`
KeypoolSize int32 `json:"keypoolsize"`
UnlockedUntil int64 `json:"unlocked_until"`
PaytxFee float64 `json:"paytxfee"`
RelayFee float64 `json:"relayfee"`
Errors string `json:"errors"`
}
// ListTransactionsResult models the data from the listtransactions command.
type ListTransactionsResult struct {
Abandoned bool `json:"abandoned"`
Account string `json:"account"`
Address string `json:"address,omitempty"`
Amount float64 `json:"amount"`
BIP125Replaceable string `json:"bip125-replaceable,omitempty"`
BlockHash string `json:"blockhash,omitempty"`
BlockIndex *int64 `json:"blockindex,omitempty"`
BlockTime int64 `json:"blocktime,omitempty"`
Category string `json:"category"`
Confirmations int64 `json:"confirmations"`
Fee *float64 `json:"fee,omitempty"`
Generated bool `json:"generated,omitempty"`
InvolvesWatchOnly bool `json:"involveswatchonly,omitempty"`
Time int64 `json:"time"`
TimeReceived int64 `json:"timereceived"`
Trusted bool `json:"trusted"`
TxID string `json:"txid"`
Vout uint32 `json:"vout"`
WalletConflicts []string `json:"walletconflicts"`
Comment string `json:"comment,omitempty"`
OtherAccount string `json:"otheraccount,omitempty"`
}
// ListReceivedByAccountResult models the data from the listreceivedbyaccount
// command.
type ListReceivedByAccountResult struct {
Account string `json:"account"`
Amount float64 `json:"amount"`
Confirmations uint64 `json:"confirmations"`
}
// ListReceivedByAddressResult models the data from the listreceivedbyaddress
// command.
type ListReceivedByAddressResult struct {
Account string `json:"account"`
Address string `json:"address"`
Amount float64 `json:"amount"`
Confirmations uint64 `json:"confirmations"`
TxIDs []string `json:"txids,omitempty"`
InvolvesWatchonly bool `json:"involvesWatchonly,omitempty"`
}
// ListSinceBlockResult models the data from the listsinceblock command.
type ListSinceBlockResult struct {
Transactions []ListTransactionsResult `json:"transactions"`
LastBlock string `json:"lastblock"`
}
// ListUnspentResult models a successful response from the listunspent request.
type ListUnspentResult struct {
TxID string `json:"txid"`
Vout uint32 `json:"vout"`
Address string `json:"address"`
Account string `json:"account"`
ScriptPubKey string `json:"scriptPubKey"`
RedeemScript string `json:"redeemScript,omitempty"`
Amount float64 `json:"amount"`
Confirmations int64 `json:"confirmations"`
Spendable bool `json:"spendable"`
}
// SignRawTransactionError models the data that contains script verification
// errors from the signrawtransaction request.
type SignRawTransactionError struct {
TxID string `json:"txid"`
Vout uint32 `json:"vout"`
ScriptSig string `json:"scriptSig"`
Sequence uint32 `json:"sequence"`
Error string `json:"error"`
}
// SignRawTransactionResult models the data from the signrawtransaction
// command.
type SignRawTransactionResult struct {
Hex string `json:"hex"`
Complete bool `json:"complete"`
Errors []SignRawTransactionError `json:"errors,omitempty"`
}
// ValidateAddressWalletResult models the data returned by the wallet server
// validateaddress command.
type ValidateAddressWalletResult struct {
IsValid bool `json:"isvalid"`
Address string `json:"address,omitempty"`
IsMine bool `json:"ismine,omitempty"`
IsWatchOnly bool `json:"iswatchonly,omitempty"`
IsScript bool `json:"isscript,omitempty"`
PubKey string `json:"pubkey,omitempty"`
IsCompressed bool `json:"iscompressed,omitempty"`
Account string `json:"account,omitempty"`
Addresses []string `json:"addresses,omitempty"`
Hex string `json:"hex,omitempty"`
Script string `json:"script,omitempty"`
SigsRequired int32 `json:"sigsrequired,omitempty"`
}
// GetBestBlockResult models the data from the getbestblock command.
type GetBestBlockResult struct {
Hash string `json:"hash"`
Height int32 `json:"height"`
}
// BalanceDetailsResult models the details data from the `getbalances` command.
type BalanceDetailsResult struct {
Trusted float64 `json:"trusted"`
UntrustedPending float64 `json:"untrusted_pending"`
Immature float64 `json:"immature"`
Used *float64 `json:"used"`
}
// GetBalancesResult models the data returned from the getbalances command.
type GetBalancesResult struct {
Mine BalanceDetailsResult `json:"mine"`
WatchOnly *BalanceDetailsResult `json:"watchonly"`
}
// ImportMultiResults is a slice that models the result of the importmulti command.
//
// Each item in the slice contains the execution result corresponding to the input
// requests of type btcjson.ImportMultiRequest, passed to the ImportMulti[Async]
// function.
type ImportMultiResults []struct {
Success bool `json:"success"`
Error *RPCError `json:"error,omitempty"`
Warnings *[]string `json:"warnings,omitempty"`
}
btcjson: update ListTransactionsResult for Bitcoin 0.20.0
This only adds new fields as optional, in order to make this change
backwards compatible with older versions of Bitcoin Core.
// Copyright (c) 2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcjson
// GetTransactionDetailsResult models the details data from the gettransaction command.
//
// This models the "short" version of the ListTransactionsResult type, which
// excludes fields common to the transaction. These common fields are instead
// part of the GetTransactionResult.
type GetTransactionDetailsResult struct {
Account string `json:"account"`
Address string `json:"address,omitempty"`
Amount float64 `json:"amount"`
Category string `json:"category"`
InvolvesWatchOnly bool `json:"involveswatchonly,omitempty"`
Fee *float64 `json:"fee,omitempty"`
Vout uint32 `json:"vout"`
}
// GetTransactionResult models the data from the gettransaction command.
type GetTransactionResult struct {
Amount float64 `json:"amount"`
Fee float64 `json:"fee,omitempty"`
Confirmations int64 `json:"confirmations"`
BlockHash string `json:"blockhash"`
BlockIndex int64 `json:"blockindex"`
BlockTime int64 `json:"blocktime"`
TxID string `json:"txid"`
WalletConflicts []string `json:"walletconflicts"`
Time int64 `json:"time"`
TimeReceived int64 `json:"timereceived"`
Details []GetTransactionDetailsResult `json:"details"`
Hex string `json:"hex"`
}
// InfoWalletResult models the data returned by the wallet server getinfo
// command.
type InfoWalletResult struct {
Version int32 `json:"version"`
ProtocolVersion int32 `json:"protocolversion"`
WalletVersion int32 `json:"walletversion"`
Balance float64 `json:"balance"`
Blocks int32 `json:"blocks"`
TimeOffset int64 `json:"timeoffset"`
Connections int32 `json:"connections"`
Proxy string `json:"proxy"`
Difficulty float64 `json:"difficulty"`
TestNet bool `json:"testnet"`
KeypoolOldest int64 `json:"keypoololdest"`
KeypoolSize int32 `json:"keypoolsize"`
UnlockedUntil int64 `json:"unlocked_until"`
PaytxFee float64 `json:"paytxfee"`
RelayFee float64 `json:"relayfee"`
Errors string `json:"errors"`
}
// ListTransactionsResult models the data from the listtransactions command.
type ListTransactionsResult struct {
Abandoned bool `json:"abandoned"`
Account string `json:"account"`
Address string `json:"address,omitempty"`
Amount float64 `json:"amount"`
BIP125Replaceable string `json:"bip125-replaceable,omitempty"`
BlockHash string `json:"blockhash,omitempty"`
BlockHeight *int32 `json:"blockheight,omitempty"`
BlockIndex *int64 `json:"blockindex,omitempty"`
BlockTime int64 `json:"blocktime,omitempty"`
Category string `json:"category"`
Confirmations int64 `json:"confirmations"`
Fee *float64 `json:"fee,omitempty"`
Generated bool `json:"generated,omitempty"`
InvolvesWatchOnly bool `json:"involveswatchonly,omitempty"`
Label *string `json:"label,omitempty"`
Time int64 `json:"time"`
TimeReceived int64 `json:"timereceived"`
Trusted bool `json:"trusted"`
TxID string `json:"txid"`
Vout uint32 `json:"vout"`
WalletConflicts []string `json:"walletconflicts"`
Comment string `json:"comment,omitempty"`
OtherAccount string `json:"otheraccount,omitempty"`
}
// ListReceivedByAccountResult models the data from the listreceivedbyaccount
// command.
type ListReceivedByAccountResult struct {
Account string `json:"account"`
Amount float64 `json:"amount"`
Confirmations uint64 `json:"confirmations"`
}
// ListReceivedByAddressResult models the data from the listreceivedbyaddress
// command.
type ListReceivedByAddressResult struct {
Account string `json:"account"`
Address string `json:"address"`
Amount float64 `json:"amount"`
Confirmations uint64 `json:"confirmations"`
TxIDs []string `json:"txids,omitempty"`
InvolvesWatchonly bool `json:"involvesWatchonly,omitempty"`
}
// ListSinceBlockResult models the data from the listsinceblock command.
type ListSinceBlockResult struct {
Transactions []ListTransactionsResult `json:"transactions"`
LastBlock string `json:"lastblock"`
}
// ListUnspentResult models a successful response from the listunspent request.
type ListUnspentResult struct {
TxID string `json:"txid"`
Vout uint32 `json:"vout"`
Address string `json:"address"`
Account string `json:"account"`
ScriptPubKey string `json:"scriptPubKey"`
RedeemScript string `json:"redeemScript,omitempty"`
Amount float64 `json:"amount"`
Confirmations int64 `json:"confirmations"`
Spendable bool `json:"spendable"`
}
// SignRawTransactionError models the data that contains script verification
// errors from the signrawtransaction request.
type SignRawTransactionError struct {
TxID string `json:"txid"`
Vout uint32 `json:"vout"`
ScriptSig string `json:"scriptSig"`
Sequence uint32 `json:"sequence"`
Error string `json:"error"`
}
// SignRawTransactionResult models the data from the signrawtransaction
// command.
type SignRawTransactionResult struct {
Hex string `json:"hex"`
Complete bool `json:"complete"`
Errors []SignRawTransactionError `json:"errors,omitempty"`
}
// ValidateAddressWalletResult models the data returned by the wallet server
// validateaddress command.
type ValidateAddressWalletResult struct {
IsValid bool `json:"isvalid"`
Address string `json:"address,omitempty"`
IsMine bool `json:"ismine,omitempty"`
IsWatchOnly bool `json:"iswatchonly,omitempty"`
IsScript bool `json:"isscript,omitempty"`
PubKey string `json:"pubkey,omitempty"`
IsCompressed bool `json:"iscompressed,omitempty"`
Account string `json:"account,omitempty"`
Addresses []string `json:"addresses,omitempty"`
Hex string `json:"hex,omitempty"`
Script string `json:"script,omitempty"`
SigsRequired int32 `json:"sigsrequired,omitempty"`
}
// GetBestBlockResult models the data from the getbestblock command.
type GetBestBlockResult struct {
Hash string `json:"hash"`
Height int32 `json:"height"`
}
// BalanceDetailsResult models the details data from the `getbalances` command.
type BalanceDetailsResult struct {
Trusted float64 `json:"trusted"`
UntrustedPending float64 `json:"untrusted_pending"`
Immature float64 `json:"immature"`
Used *float64 `json:"used"`
}
// GetBalancesResult models the data returned from the getbalances command.
type GetBalancesResult struct {
Mine BalanceDetailsResult `json:"mine"`
WatchOnly *BalanceDetailsResult `json:"watchonly"`
}
// ImportMultiResults is a slice that models the result of the importmulti command.
//
// Each item in the slice contains the execution result corresponding to the input
// requests of type btcjson.ImportMultiRequest, passed to the ImportMulti[Async]
// function.
type ImportMultiResults []struct {
Success bool `json:"success"`
Error *RPCError `json:"error,omitempty"`
Warnings *[]string `json:"warnings,omitempty"`
}
|
package vxlan
import (
//"fmt"
//"strings"
//"time"
"net"
"strconv"
log "github.com/Sirupsen/logrus"
"github.com/docker/go-plugins-helpers/network"
//"github.com/samalba/dockerclient"
//"github.com/davecgh/go-spew/spew"
"github.com/vishvananda/netlink"
)
type Driver struct {
network.Driver
networks map[string]*NetworkState
}
// NetworkState is filled in at network creation time
// it contains state that we wish to keep for each network
type NetworkState struct {
Bridge *netlink.Bridge
VXLan *netlink.Vxlan
Gateway string
IPv4Data []*network.IPAMData
IPv6Data []*network.IPAMData
}
func NewDriver() (*Driver, error) {
d := &Driver{
networks: make(map[string]*NetworkState),
}
return d, nil
}
func (d *Driver) CreateNetwork(r *network.CreateNetworkRequest) error {
log.Debugf("Create network request: %+v", r)
netID := r.NetworkID
var err error
bridgeName := "br_" + netID[0:12]
vxlanName := "br_" + netID[0:12]
// get interface names from options first
for k, v := range r.Options {
if k == "com.docker.network.generic" {
if genericOpts, ok := v.(map[string]interface{}); ok {
for key, val := range genericOpts {
if key == "vxlanName" {
vxlanName = val.(string)
}
if key == "bridgeName" {
bridgeName = val.(string)
}
}
}
}
}
// create links
bridge := &netlink.Bridge{
LinkAttrs: netlink.LinkAttrs{
Name: bridgeName,
},
}
vxlan := &netlink.Vxlan{
LinkAttrs: netlink.LinkAttrs{
Name: vxlanName,
},
}
// Parse interface options
for k, v := range r.Options {
if k == "com.docker.network.generic" {
if genericOpts, ok := v.(map[string]interface{}); ok {
for key, val := range genericOpts {
log.Debugf("Libnetwork Opts Sent: [ %s ] Value: [ %s ]", key, val)
if key == "vxlanMTU" {
vxlan.LinkAttrs.MTU, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "bridgeMTU" {
bridge.LinkAttrs.MTU, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "vxlanHardwareAddr" {
vxlan.LinkAttrs.HardwareAddr, err = net.ParseMAC(val.(string))
if err != nil {
return err
}
}
if key == "bridgeHardwareAddr" {
bridge.LinkAttrs.HardwareAddr, err = net.ParseMAC(val.(string))
if err != nil {
return err
}
}
if key == "vxlanTxQLen" {
vxlan.LinkAttrs.TxQLen, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "bridgeTxQLen" {
bridge.LinkAttrs.TxQLen, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "VxlanId" {
vxlan.VxlanId, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "VtepDev" {
vtepDev, err := netlink.LinkByName(val.(string))
if err != nil {
return err
}
vxlan.VtepDevIndex = vtepDev.Attrs().Index
}
if key == "SrcAddr" {
vxlan.SrcAddr = net.ParseIP(val.(string))
}
if key == "Group" {
vxlan.Group = net.ParseIP(val.(string))
}
if key == "TTL" {
vxlan.TTL, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "TOS" {
vxlan.TOS, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "Learning" {
vxlan.Learning, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "Proxy" {
vxlan.Proxy, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "RSC" {
vxlan.RSC, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "L2miss" {
vxlan.L2miss, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "L3miss" {
vxlan.L3miss, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "NoAge" {
vxlan.NoAge, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "GBP" {
vxlan.GBP, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "Age" {
vxlan.Age, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "Limit" {
vxlan.Limit, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "Port" {
vxlan.Port, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "PortLow" {
vxlan.PortLow, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "PortHigh" {
vxlan.PortHigh, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
}
}
}
}
// Done parsing options
// delete links if they already exist, don't worry about errors
netlink.LinkDel(bridge)
netlink.LinkDel(vxlan)
// add links
err = netlink.LinkAdd(bridge)
if err != nil {
return err
}
err = netlink.LinkAdd(vxlan)
if err != nil {
return err
}
// add vxlan to bridge
err = netlink.LinkSetMaster(vxlan, bridge)
if err != nil {
return err
}
// bring interfaces up
err = netlink.LinkSetUp(bridge)
if err != nil {
return err
}
err = netlink.LinkSetUp(vxlan)
if err != nil {
return err
}
// store interfaces to be used later
ns := &NetworkState{
VXLan: vxlan,
Bridge: bridge,
IPv4Data: r.IPv4Data,
IPv6Data: r.IPv6Data,
}
// Add IPs to interfaces
// Process IPv6 first. If both are inclued, IPv4 gateway will be the one that
// remains, because JoinResponse can only include one Gateway
for i := range r.IPv6Data {
gatewayIP, err := netlink.ParseAddr(r.IPv6Data[i].Gateway)
if err != nil {
return err
}
ns.Gateway = r.IPv6Data[i].Gateway
netlink.AddrAdd(bridge, gatewayIP)
}
for i := range r.IPv4Data {
gatewayIP, err := netlink.ParseAddr(r.IPv4Data[i].Gateway)
if err != nil {
return err
}
ns.Gateway = r.IPv4Data[i].Gateway
netlink.AddrAdd(bridge, gatewayIP)
}
d.networks[netID] = ns
return nil
}
func (d *Driver) DeleteNetwork(r *network.DeleteNetworkRequest) error {
netID := r.NetworkID
err := netlink.LinkDel(d.networks[netID].VXLan)
if err != nil {
return err
}
err = netlink.LinkDel(d.networks[netID].Bridge)
if err != nil {
return err
}
return nil
}
func (d *Driver) CreateEndpoint(r *network.CreateEndpointRequest) error {
log.Debugf("Create endpoint request: %+v", r)
return nil
}
func (d *Driver) DeleteEndpoint(r *network.DeleteEndpointRequest) error {
log.Debugf("Delete endpoint request: %+v", r)
return nil
}
func (d *Driver) EndpointInfo(r *network.InfoRequest) (*network.InfoResponse, error) {
res := &network.InfoResponse{
Value: make(map[string]string),
}
return res, nil
}
func (d *Driver) Join(r *network.JoinRequest) (*network.JoinResponse, error) {
// create and attach local name to the bridge
veth := &netlink.Veth{
LinkAttrs: netlink.LinkAttrs{Name: "veth_" + r.EndpointID[:5]},
PeerName: "ethc" + r.EndpointID[:5],
}
if err := netlink.LinkAdd(veth); err != nil {
log.Errorf("failed to create the veth pair named: [ %v ] error: [ %s ] ", veth, err)
return nil, err
}
// bring up the veth pair
err := netlink.LinkSetUp(veth)
if err != nil {
log.Warnf("Error enabling Veth local iface: [ %v ]", veth)
return nil, err
}
bridge := d.networks[r.NetworkID].Bridge
// add veth to bridge
err = netlink.LinkSetMaster(veth, bridge)
if err != nil {
return nil, err
}
// SrcName gets renamed to DstPrefix + ID on the container iface
res := &network.JoinResponse{
InterfaceName: network.InterfaceName{
SrcName: veth.PeerName,
DstPrefix: "eth",
},
Gateway: d.networks[r.NetworkID].Gateway,
}
log.Debugf("Join endpoint %s:%s to %s", r.NetworkID, r.EndpointID, r.SandboxKey)
return res, nil
}
fix int name
package vxlan
import (
//"fmt"
//"strings"
//"time"
"net"
"strconv"
log "github.com/Sirupsen/logrus"
"github.com/docker/go-plugins-helpers/network"
//"github.com/samalba/dockerclient"
//"github.com/davecgh/go-spew/spew"
"github.com/vishvananda/netlink"
)
type Driver struct {
network.Driver
networks map[string]*NetworkState
}
// NetworkState is filled in at network creation time
// it contains state that we wish to keep for each network
type NetworkState struct {
Bridge *netlink.Bridge
VXLan *netlink.Vxlan
Gateway string
IPv4Data []*network.IPAMData
IPv6Data []*network.IPAMData
}
func NewDriver() (*Driver, error) {
d := &Driver{
networks: make(map[string]*NetworkState),
}
return d, nil
}
func (d *Driver) CreateNetwork(r *network.CreateNetworkRequest) error {
log.Debugf("Create network request: %+v", r)
netID := r.NetworkID
var err error
bridgeName := "br_" + netID[:12]
vxlanName := "vx_" + netID[:12]
// get interface names from options first
for k, v := range r.Options {
if k == "com.docker.network.generic" {
if genericOpts, ok := v.(map[string]interface{}); ok {
for key, val := range genericOpts {
if key == "vxlanName" {
vxlanName = val.(string)
}
if key == "bridgeName" {
bridgeName = val.(string)
}
}
}
}
}
// create links
bridge := &netlink.Bridge{
LinkAttrs: netlink.LinkAttrs{
Name: bridgeName,
},
}
vxlan := &netlink.Vxlan{
LinkAttrs: netlink.LinkAttrs{
Name: vxlanName,
},
}
// Parse interface options
for k, v := range r.Options {
if k == "com.docker.network.generic" {
if genericOpts, ok := v.(map[string]interface{}); ok {
for key, val := range genericOpts {
log.Debugf("Libnetwork Opts Sent: [ %s ] Value: [ %s ]", key, val)
if key == "vxlanMTU" {
vxlan.LinkAttrs.MTU, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "bridgeMTU" {
bridge.LinkAttrs.MTU, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "vxlanHardwareAddr" {
vxlan.LinkAttrs.HardwareAddr, err = net.ParseMAC(val.(string))
if err != nil {
return err
}
}
if key == "bridgeHardwareAddr" {
bridge.LinkAttrs.HardwareAddr, err = net.ParseMAC(val.(string))
if err != nil {
return err
}
}
if key == "vxlanTxQLen" {
vxlan.LinkAttrs.TxQLen, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "bridgeTxQLen" {
bridge.LinkAttrs.TxQLen, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "VxlanId" {
vxlan.VxlanId, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "VtepDev" {
vtepDev, err := netlink.LinkByName(val.(string))
if err != nil {
return err
}
vxlan.VtepDevIndex = vtepDev.Attrs().Index
}
if key == "SrcAddr" {
vxlan.SrcAddr = net.ParseIP(val.(string))
}
if key == "Group" {
vxlan.Group = net.ParseIP(val.(string))
}
if key == "TTL" {
vxlan.TTL, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "TOS" {
vxlan.TOS, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "Learning" {
vxlan.Learning, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "Proxy" {
vxlan.Proxy, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "RSC" {
vxlan.RSC, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "L2miss" {
vxlan.L2miss, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "L3miss" {
vxlan.L3miss, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "NoAge" {
vxlan.NoAge, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "GBP" {
vxlan.GBP, err = strconv.ParseBool(val.(string))
if err != nil {
return err
}
}
if key == "Age" {
vxlan.Age, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "Limit" {
vxlan.Limit, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "Port" {
vxlan.Port, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "PortLow" {
vxlan.PortLow, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
if key == "PortHigh" {
vxlan.PortHigh, err = strconv.Atoi(val.(string))
if err != nil {
return err
}
}
}
}
}
}
// Done parsing options
// delete links if they already exist, don't worry about errors
netlink.LinkDel(bridge)
netlink.LinkDel(vxlan)
// add links
err = netlink.LinkAdd(bridge)
if err != nil {
return err
}
err = netlink.LinkAdd(vxlan)
if err != nil {
return err
}
// add vxlan to bridge
err = netlink.LinkSetMaster(vxlan, bridge)
if err != nil {
return err
}
// bring interfaces up
err = netlink.LinkSetUp(bridge)
if err != nil {
return err
}
err = netlink.LinkSetUp(vxlan)
if err != nil {
return err
}
// store interfaces to be used later
ns := &NetworkState{
VXLan: vxlan,
Bridge: bridge,
IPv4Data: r.IPv4Data,
IPv6Data: r.IPv6Data,
}
// Add IPs to interfaces
// Process IPv6 first. If both are inclued, IPv4 gateway will be the one that
// remains, because JoinResponse can only include one Gateway
for i := range r.IPv6Data {
gatewayIP, err := netlink.ParseAddr(r.IPv6Data[i].Gateway)
if err != nil {
return err
}
ns.Gateway = r.IPv6Data[i].Gateway
netlink.AddrAdd(bridge, gatewayIP)
}
for i := range r.IPv4Data {
gatewayIP, err := netlink.ParseAddr(r.IPv4Data[i].Gateway)
if err != nil {
return err
}
ns.Gateway = r.IPv4Data[i].Gateway
netlink.AddrAdd(bridge, gatewayIP)
}
d.networks[netID] = ns
return nil
}
func (d *Driver) DeleteNetwork(r *network.DeleteNetworkRequest) error {
netID := r.NetworkID
err := netlink.LinkDel(d.networks[netID].VXLan)
if err != nil {
return err
}
err = netlink.LinkDel(d.networks[netID].Bridge)
if err != nil {
return err
}
return nil
}
func (d *Driver) CreateEndpoint(r *network.CreateEndpointRequest) error {
log.Debugf("Create endpoint request: %+v", r)
return nil
}
func (d *Driver) DeleteEndpoint(r *network.DeleteEndpointRequest) error {
log.Debugf("Delete endpoint request: %+v", r)
return nil
}
func (d *Driver) EndpointInfo(r *network.InfoRequest) (*network.InfoResponse, error) {
res := &network.InfoResponse{
Value: make(map[string]string),
}
return res, nil
}
func (d *Driver) Join(r *network.JoinRequest) (*network.JoinResponse, error) {
// create and attach local name to the bridge
veth := &netlink.Veth{
LinkAttrs: netlink.LinkAttrs{Name: "veth_" + r.EndpointID[:5]},
PeerName: "ethc" + r.EndpointID[:5],
}
if err := netlink.LinkAdd(veth); err != nil {
log.Errorf("failed to create the veth pair named: [ %v ] error: [ %s ] ", veth, err)
return nil, err
}
// bring up the veth pair
err := netlink.LinkSetUp(veth)
if err != nil {
log.Warnf("Error enabling Veth local iface: [ %v ]", veth)
return nil, err
}
bridge := d.networks[r.NetworkID].Bridge
// add veth to bridge
err = netlink.LinkSetMaster(veth, bridge)
if err != nil {
return nil, err
}
// SrcName gets renamed to DstPrefix + ID on the container iface
res := &network.JoinResponse{
InterfaceName: network.InterfaceName{
SrcName: veth.PeerName,
DstPrefix: "eth",
},
Gateway: d.networks[r.NetworkID].Gateway,
}
log.Debugf("Join endpoint %s:%s to %s", r.NetworkID, r.EndpointID, r.SandboxKey)
return res, nil
}
|
// Copyright © 2017 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"os"
"time"
"google.golang.org/grpc/grpclog"
"github.com/franela/goreq"
"github.com/ellcrys/cocoon/core/api/cmd"
"github.com/ellcrys/cocoon/core/common"
"github.com/ellcrys/cocoon/core/config"
logging "github.com/op/go-logging"
)
var log *logging.Logger
func init() {
config.ConfigureLogger()
log = logging.MustGetLogger("main")
goreq.SetConnectTimeout(5 * time.Second)
if len(os.Getenv("ENABLE_GRPC_LOG")) == 0 {
gl := common.GLogger{}
gl.Disable(true, true)
grpclog.SetLogger(&gl)
}
}
func main() {
// defer profile.Start(profile.MemProfile).Stop()
// go func() {
// go func() {
// log.Info(http.ListenAndServe("localhost:6060", nil).Error())
// }()
// for {
// var mem runtime.MemStats
// runtime.ReadMemStats(&mem)
// log.Infof("Alloc: %d, Total Alloc: %d, HeapAlloc: %d, HeapSys: %d", mem.Alloc, mem.TotalAlloc, mem.HeapAlloc, mem.HeapSys)
// time.Sleep(10 * time.Second)
// }
// }()
if err := cmd.RootCmd.Execute(); err != nil {
log.Error(err.Error())
os.Exit(-1)
}
}
unnecessary
package main
import (
"os"
"time"
"google.golang.org/grpc/grpclog"
"github.com/ellcrys/cocoon/core/api/cmd"
"github.com/ellcrys/cocoon/core/common"
"github.com/ellcrys/cocoon/core/config"
"github.com/franela/goreq"
logging "github.com/op/go-logging"
)
var log *logging.Logger
func init() {
config.ConfigureLogger()
log = logging.MustGetLogger("main")
goreq.SetConnectTimeout(5 * time.Second)
if len(os.Getenv("ENABLE_GRPC_LOG")) == 0 {
gl := common.GLogger{}
gl.Disable(true, true)
grpclog.SetLogger(&gl)
}
}
func main() {
if err := cmd.RootCmd.Execute(); err != nil {
log.Error(err.Error())
os.Exit(-1)
}
}
|
// Copyright 2017, Kerby Shedden and the Muscato contributors.
// Muscato (Multi-Genome Scalable Alignment Tool) is a software tool
// for matching large collections of reads into large collections of
// target sequence (e.g. transcript sequences).
//
// Muscato uses a two-stage approach. First, high-entropy
// subsequences of the reads are used to produce Bloom filter sketches
// of the read collection. These sketches are used to identify
// candidate matches. For example, if three offsets are chosen at
// positions 0, 20, and 40 of the reads, then three Bloom filter
// sketches are constructed. Then, every window in the target
// sequence collection is queried against these sketches, identifying
// a set of candidate matches. In the next step, for every
// subsequence appearing at each read offset, all reads and all genes
// containing the subsequence are assessed for pairwise similarity,
// and read/target pairs showing sufficiently high similarity are
// retained.
//
// This script is the entry point for the Muscato tool. Normally,
// this is the only script that will be run directly. It calls the
// other Muscato scripts in turn.
//
// Muscato can be invoked either using a configuration file in JSON
// format, or using command-line flags. A typical invocation using
// flags is:
//
// muscato --ResultsFileName=results.txt --ReadFileName=reads.fastq --GeneFileName=genes.txt.sz --GeneIdFileName=1genes_ids.txt.sz
// --Windows=0,20,40,60,80 --WindowWidth=15 --BloomSize=4000000000 --NumHash=20 --PMatch=0.96 --MinDinuc=5 --MinReadLength=50
// --MaxMatches=1000000 --MaxMergeProcs=5 --MaxReadLength=300 --MatchMode=best --MMTol=2
//
// To use a JSON config file, create a file with the flag information in JSON format, e.g.
//
// {ResultsFileName: "results.txt", MaxReadLength: 300, ...}
//
// Then provide the configuration file path when invoking Muscato, e.g.
//
// muscato --ConfigFileName=config.json
//
// Note that before running muscato, it is necessary to produce a
// processed version of the target sequence data. This can be done
// using the muscato_prep_targets tool, invoked as follows.
//
// muscato_prep_targets genes.fasta
//
// See utils/Config.go for the full set of configuration parameters.
//
// Muscato generates a number of intermediate files and logs that by
// default are placed into the directory tmp/#####, where ##### is a
// generated number. This temporary directory can be deleted after a
// successful run if desired. The log files in the tmp directory may
// contain useful information for troubleshooting.
//
// Since Muscato uses Unix-style FIFOs for interprocess communication,
// it can only be run on Unix-like systems at present. For the same
// reason, Muscato may not be runnable from AFS or NFS implementations
// that do not support FIFOs.
package main
import (
"bufio"
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
"github.com/golang/snappy"
"github.com/kshedden/muscato/utils"
"github.com/scipipe/scipipe"
"github.com/willf/bloom"
"golang.org/x/sys/unix"
)
var (
startpoint int
configFilePath string
config *utils.Config
basename string
tmpdir string
pipedir string
logger *log.Logger
// Flag for setting the tmp file location for sorting.
sortTmpFlag string
)
const (
sortbuf string = "-S 2G"
sortpar string = "--parallel=8"
)
func pipename() string {
f := fmt.Sprintf("%09d", rand.Int63()%1e9)
return path.Join(pipedir, f)
}
// pipefromsz creates a fifo and starts decompressing the given snappy
// file into it.
func pipefromsz(fname string) string {
rand.Seed(int64(time.Now().UnixNano() + int64(os.Getpid())))
for k := 0; k < 10; k++ {
name := pipename()
err := unix.Mkfifo(name, 0755)
if err == nil {
go func() {
cmd := exec.Command("sztool", "-d", fname, name)
cmd.Env = os.Environ()
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
panic(err)
}
}()
return name
}
print(fmt.Sprintf("%v\n", err))
}
panic("unable to create pipe")
}
func prepReads() {
logger.Printf("starting prepReads")
logger.Printf("Running command: 'muscato_prep_reads %s'", configFilePath)
cmd0 := exec.Command("muscato_prep_reads", configFilePath)
cmd0.Env = os.Environ()
cmd0.Stderr = os.Stderr
cmd1 := exec.Command("sort", sortbuf, sortpar, sortTmpFlag)
cmd1.Env = os.Environ()
cmd1.Stderr = os.Stderr
var err error
cmd1.Stdin, err = cmd0.StdoutPipe()
if err != nil {
panic(err)
}
pip, err := cmd1.StdoutPipe()
if err != nil {
panic(err)
}
cmds := []*exec.Cmd{cmd0, cmd1}
for _, cmd := range cmds {
err = cmd.Start()
if err != nil {
panic(err)
}
}
scanner := bufio.NewScanner(pip)
buf := make([]byte, 1024*1024)
scanner.Buffer(buf, len(buf))
// File for sequences
outname := path.Join(tmpdir, "reads_sorted.txt.sz")
logger.Printf("Writing sequences to %s", outname)
fid, err := os.Create(outname)
if err != nil {
panic(err)
}
defer fid.Close()
wtr := snappy.NewBufferedWriter(fid)
defer wtr.Close()
// Get the first line
if !scanner.Scan() {
logger.Printf("no input")
panic("no input (is the read file empty?)")
}
if err := scanner.Err(); err != nil {
panic(err)
}
fields := strings.Fields(scanner.Text())
seq := fields[0]
name := []string{fields[1]}
n := 1
nseq := 0
dowrite := func(seq string, name []string, n int) {
xn := strings.Join(name, ";")
if len(xn) > 1000 {
xn = xn[0:995]
xn += "..."
}
nseq++
_, err = wtr.Write([]byte(seq))
if err != nil {
panic(err)
}
_, err = wtr.Write([]byte("\t"))
if err != nil {
panic(err)
}
s := fmt.Sprintf("%d\t%s\n", n, xn)
_, err = wtr.Write([]byte(s))
if err != nil {
panic(err)
}
}
for scanner.Scan() {
line := scanner.Text()
fields1 := strings.Fields(line)
seq1 := fields1[0]
name1 := fields1[1]
if strings.Compare(seq, seq1) == 0 {
n++
name = append(name, name1)
continue
}
dowrite(seq, name, n)
seq = seq1
name = name[0:1]
name[0] = name1
n = 1
}
if err := scanner.Err(); err != nil {
panic(err)
}
// Read to EOF before calling wait.
dowrite(seq, name, n)
for _, cmd := range cmds {
if err := cmd.Wait(); err != nil {
log.Fatal(err)
}
}
logger.Printf(fmt.Sprintf("Wrote %d read sequences", nseq))
logger.Printf("prepReads done")
}
func windowReads() {
logger.Printf("starting windowReads")
logger.Printf("Running command: 'muscato_window_reads %s'\n", configFilePath)
cmd := exec.Command("muscato_window_reads", configFilePath)
cmd.Env = os.Environ()
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
panic(err)
}
logger.Printf("windowReads done")
}
func sortWindows() {
logger.Printf("starting sortWindows")
for k := 0; k < len(config.Windows); k++ {
f := fmt.Sprintf("win_%d.txt.sz", k)
fname := path.Join(tmpdir, f)
pname1 := pipefromsz(fname)
cmd1 := exec.Command("sort", sortbuf, sortpar, sortTmpFlag, "-k1", pname1)
cmd1.Env = os.Environ()
cmd1.Stderr = os.Stderr
fname = strings.Replace(fname, ".txt.sz", "_sorted.txt.sz", 1)
cmd2 := exec.Command("sztool", "-c", "-", fname)
cmd2.Env = os.Environ()
cmd2.Stderr = os.Stderr
var err error
cmd2.Stdin, err = cmd1.StdoutPipe()
if err != nil {
panic(err)
}
cmds := []*exec.Cmd{cmd2, cmd1}
for _, cmd := range cmds {
err := cmd.Start()
if err != nil {
panic(err)
}
}
// Order is important here, need to wait on cmd2
// before waiting on cmd1.
for _, cmd := range cmds {
err := cmd.Wait()
if err != nil {
panic(err)
}
}
}
logger.Printf("sortWindows done")
}
func screen() {
logger.Printf("Starting screening")
logger.Printf("Running command: 'muscato_screen %s'\n", configFilePath)
cmd := exec.Command("muscato_screen", configFilePath)
cmd.Env = os.Environ()
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
panic(err)
}
logger.Printf("screening done")
}
func sortBloom() {
logger.Printf("starting sortBloom")
for k := range config.Windows {
f := fmt.Sprintf("bmatch_%d.txt.sz", k)
fname := path.Join(tmpdir, f)
pname1 := pipefromsz(fname)
cmd1 := exec.Command("sort", sortbuf, sortpar, sortTmpFlag, "-k1", pname1)
cmd1.Env = os.Environ()
cmd1.Stderr = os.Stderr
f = fmt.Sprintf("smatch_%d.txt.sz", k)
fname = path.Join(tmpdir, f)
cmd2 := exec.Command("sztool", "-c", "-", fname)
cmd2.Env = os.Environ()
cmd2.Stderr = os.Stderr
var err error
cmd2.Stdin, err = cmd1.StdoutPipe()
if err != nil {
panic(err)
}
// Order matters here
cmds := []*exec.Cmd{cmd2, cmd1}
for _, cmd := range cmds {
err := cmd.Start()
if err != nil {
panic(err)
}
}
// Order is important, must wait on cmd2 first
for _, cmd := range cmds {
err := cmd.Wait()
if err != nil {
panic(err)
}
}
}
logger.Printf("sortBloom done")
}
func confirm() {
logger.Printf("starting match confirmation")
fp := 0
for {
nproc := config.MaxMergeProcs
if nproc > len(config.Windows)-fp {
nproc = len(config.Windows) - fp
}
if nproc == 0 {
break
}
var cmds []*exec.Cmd
for k := fp; k < fp+nproc; k++ {
logger.Printf("Starting a round of confirmation processes")
logger.Printf("Running command: 'muscato_confirm %s %d'\n", configFilePath, k)
cmd := exec.Command("muscato_confirm", configFilePath, fmt.Sprintf("%d", k))
cmd.Env = os.Environ()
cmd.Stderr = os.Stderr
err := cmd.Start()
if err != nil {
panic(err)
}
cmds = append(cmds, cmd)
}
for _, cmd := range cmds {
err := cmd.Wait()
if err != nil {
panic(err)
}
}
fp += nproc
}
logger.Printf("match confirmation done")
}
// writebest accepts a set of lines (lines), which have also been
// broken into fields (bfr). Every line represents a candidate match.
// The matches with at most mmtol more matches than the best match are
// written to the io writer (wtr). ibuf is provided workspace.
func writebest(lines []string, bfr [][]string, wtr io.Writer, ibuf []int, mmtol int) []int {
// Find the best fit, determine the number of mismatches for each sequence.
ibuf = ibuf[0:0]
best := -1
for _, x := range bfr {
y, err := strconv.Atoi(x[3]) // 3 is position of nmiss
if err != nil {
panic(err)
}
if best == -1 || y < best {
best = y
}
ibuf = append(ibuf, y)
}
// Output the sequences with acceptable number of mismatches.
for i, x := range lines {
if ibuf[i] <= best+mmtol {
_, err := wtr.Write([]byte(x))
if err != nil {
panic(err)
}
_, err = wtr.Write([]byte("\n"))
if err != nil {
panic(err)
}
}
}
return ibuf
}
func combineWindows() {
logger.Printf("starting combineWindows")
mmtol := config.MMTol
// Pipe everything into one sort/unique
c0 := exec.Command("sort", sortbuf, sortpar, sortTmpFlag, "-u", "-")
c0.Env = os.Environ()
c0.Stderr = os.Stderr
cmds := []*exec.Cmd{c0}
// The sorted results go to disk
outname := path.Join(tmpdir, "matches.txt.sz")
out, err := os.Create(outname)
if err != nil {
panic(err)
}
wtr := snappy.NewBufferedWriter(out)
// TODO: Add Bloom filter here to screen out duplicates
var fd []io.Reader
for j := 0; j < len(config.Windows); j++ {
f := fmt.Sprintf("rmatch_%d.txt.sz", j)
fname := path.Join(tmpdir, f)
c := exec.Command("sztool", "-d", fname)
c.Env = os.Environ()
c.Stderr = os.Stderr
cmds = append(cmds, c)
p, err := c.StdoutPipe()
if err != nil {
panic(err)
}
fd = append(fd, p)
}
c0.Stdin = io.MultiReader(fd...)
da, err := c0.StdoutPipe()
if err != nil {
panic(err)
}
for _, c := range cmds {
err := c.Start()
if err != nil {
panic(err)
}
}
// Taking all matches for the same read, retain only those
// with nmiss equal to at most one greater than the lowest
// nmiss.
sem := make(chan bool, 1)
sem <- true
// DEBUG used to be go func()
func() {
scanner := bufio.NewScanner(da)
var lines []string
var fields [][]string
var ibuf []int
var current string
for scanner.Scan() {
line := scanner.Text()
field := strings.Fields(line)
// Add to the current block.
if current == "" || field[0] == current {
lines = append(lines, line)
fields = append(fields, field)
current = field[0]
continue
}
// Process a block
ibuf = writebest(lines, fields, wtr, ibuf, mmtol)
lines = lines[0:0]
lines = append(lines, line)
fields = fields[0:0]
fields = append(fields, field)
current = field[0]
}
if err := scanner.Err(); err == nil {
// Process the final block if possible
writebest(lines, fields, wtr, ibuf, mmtol)
} else {
// Should never get here, but just in case log
// the error but don't try to process the
// remaining lines which may be corrupted.
logger.Printf("%v", err)
}
<-sem
}()
// OK to call Wait, done reading.
for _, c := range cmds {
err := c.Wait()
if err != nil {
panic(err)
}
}
sem <- true
wtr.Close()
out.Close()
logger.Printf("combineWindows done")
}
func sortByGeneId() {
logger.Printf("starting sortByGeneid")
inname := path.Join(tmpdir, "matches.txt.sz")
outname := path.Join(tmpdir, "matches_sg.txt.sz")
// Sort by gene number
cmd1 := exec.Command("sztool", "-d", inname)
cmd1.Env = os.Environ()
cmd1.Stderr = os.Stderr
// k5 is position of gene id
cmd2 := exec.Command("sort", sortbuf, sortpar, sortTmpFlag, "-k5", "-")
cmd2.Env = os.Environ()
cmd2.Stderr = os.Stderr
var err error
cmd2.Stdin, err = cmd1.StdoutPipe()
if err != nil {
panic(err)
}
cmd3 := exec.Command("sztool", "-c", "-", outname)
cmd3.Env = os.Environ()
cmd3.Stderr = os.Stderr
cmd3.Stdin, err = cmd2.StdoutPipe()
if err != nil {
panic(err)
}
// Order matters
cmds := []*exec.Cmd{cmd3, cmd2, cmd1}
for _, c := range cmds {
err := c.Start()
if err != nil {
panic(err)
}
}
// Call Wait from end to beginning of chained commands
for _, c := range cmds {
err := c.Wait()
if err != nil {
panic(err)
}
}
logger.Printf("sortbyGeneId done")
}
func joinGeneNames() {
logger.Printf("starting joinGeneNames")
// Decompress matches
ma := scipipe.NewProc("ma", fmt.Sprintf("sztool -d %s > {os:ma}", path.Join(tmpdir, "matches_sg.txt.sz")))
ma.SetPathStatic("ma", path.Join(pipedir, "jgn_ma.txt"))
// Decompress gene ids
gn := scipipe.NewProc("gn", fmt.Sprintf("sztool -d %s > {os:gn}", config.GeneIdFileName))
gn.SetPathStatic("gn", path.Join(pipedir, "jgn_gn.txt"))
// Join genes and matches
jo := scipipe.NewProc("jo", "join -1 5 -2 1 -t'\t' {i:mx} {i:gx} > {os:jx}")
jo.SetPathStatic("jx", path.Join(pipedir, "jgn_joined.txt"))
// Cut out unwanted column
ct := scipipe.NewProc("ct", "cut -d'\t' -f 1 --complement {i:jy} > {os:co}")
ct.SetPathStatic("co", path.Join(pipedir, "jgn_cut.txt"))
// Compress the result
sz := scipipe.NewProc("sz", fmt.Sprintf("sztool -c {i:zi} %s", path.Join(tmpdir, "matches_sn.txt.sz")))
jo.In("mx").Connect(ma.Out("ma"))
jo.In("gx").Connect(gn.Out("gn"))
ct.In("jy").Connect(jo.Out("jx"))
sz.In("zi").Connect(ct.Out("co"))
wf := scipipe.NewWorkflow("jgn")
wf.AddProcs(ma, gn, jo, ct, sz)
wf.SetDriver(sz)
wf.Run()
logger.Printf("joinGeneNames done")
}
func joinReadNames() {
logger.Printf("starting joinReadNames")
// The workflow hangs if the results file already exists, so
// remove it.
_, err := os.Stat(config.ResultsFileName)
if err == nil {
err := os.Remove(config.ResultsFileName)
if err != nil {
panic(err)
}
} else if os.IsNotExist(err) {
// do nothing
} else {
panic(err)
}
// Decompress matches
ma := scipipe.NewProc("ma", fmt.Sprintf("sztool -d %s > {os:ma}",
path.Join(tmpdir, "matches_sn.txt.sz")))
ma.SetPathStatic("ma", path.Join(pipedir, "jrn_ma.txt"))
// Decompress sorted reads
rd := scipipe.NewProc("rd", fmt.Sprintf("sztool -d %s > {os:rd}",
path.Join(tmpdir, "reads_sorted.txt.sz")))
rd.SetPathStatic("rd", path.Join(pipedir, "jrn_rd.txt"))
// Sort the matches
sm := scipipe.NewProc("sm", fmt.Sprintf("sort %s %s -k1 %s {i:in} > {os:sort}", sortbuf, sortpar, sortTmpFlag))
sm.SetPathStatic("sort", path.Join(pipedir, "jrn_sort.txt"))
// Join the sorted matches with the reads
jo := scipipe.NewProc("jo", "join -1 1 -2 1 -t'\t' {i:srx} {i:rdx} > {o:out}")
jo.SetPathStatic("out", config.ResultsFileName)
snk := scipipe.NewSink("snk")
// Connect the network
sm.In("in").Connect(ma.Out("ma"))
jo.In("srx").Connect(sm.Out("sort"))
jo.In("rdx").Connect(rd.Out("rd"))
snk.Connect(jo.Out("out"))
wf := scipipe.NewWorkflow("jrn")
wf.AddProcs(ma, rd, sm, jo)
wf.SetDriver(snk)
wf.Run()
logger.Printf("joinReadNames done")
}
func setupLog() {
logname := path.Join(config.LogDir, "muscato.log")
fid, err := os.Create(logname)
if err != nil {
panic(err)
}
logger = log.New(fid, "", log.Ltime)
}
// saveConfig saves the configuration file in json format into the log
// directory.
func saveConfig(config *utils.Config) {
fid, err := os.Create(path.Join(config.LogDir, "config.json"))
if err != nil {
panic(err)
}
defer fid.Close()
enc := json.NewEncoder(fid)
err = enc.Encode(config)
if err != nil {
panic(err)
}
configFilePath = path.Join(config.LogDir, "config.json")
}
func handleArgs() {
ConfigFileName := flag.String("ConfigFileName", "", "JSON file containing configuration parameters")
ReadFileName := flag.String("ReadFileName", "", "Sequencing read file (fastq format)")
GeneFileName := flag.String("GeneFileName", "", "Gene file name (processed form)")
GeneIdFileName := flag.String("GeneIdFileName", "", "Gene ID file name (processed form)")
ResultsFileName := flag.String("ResultsFileName", "", "File name for results")
WindowsRaw := flag.String("Windows", "", "Starting position of each window")
WindowWidth := flag.Int("WindowWidth", 0, "Width of each window")
BloomSize := flag.Int("BloomSize", 0, "Size of Bloom filter, in bits")
NumHash := flag.Int("NumHash", 0, "Number of hashses")
PMatch := flag.Float64("PMatch", 0, "Required proportion of matching positions")
MinDinuc := flag.Int("MinDinuc", 0, "Minimum number of dinucleotides to check for match")
TempDir := flag.String("TempDir", "", "Workspace for temporary files")
MinReadLength := flag.Int("MinReadLength", 0, "Reads shorter than this length are skipped")
MaxReadLength := flag.Int("MaxReadLength", 0, "Reads longer than this length are truncated")
MaxMatches := flag.Int("MaxMatches", 0, "Return no more than this number of matches per window")
MaxMergeProcs := flag.Int("MaxMergeProcs", 0, "Run this number of merge processes concurrently")
MMTol := flag.Int("MMTol", 0, "Number of mismatches allowed above best fit")
StartPoint := flag.Int("StartPoint", 0, "Restart at a given point in the procedure")
MatchMode := flag.String("MatchMode", "", "'first' (retain first matches meeting criteria) or 'best' (returns best matches meeting criteria)")
NoCleanTmp := flag.Bool("NoCleanTmp", false, "Leave temporary files in TempDir")
flag.Parse()
if *ConfigFileName != "" {
config = utils.ReadConfig(*ConfigFileName)
} else {
config = new(utils.Config)
}
if *ReadFileName != "" {
config.ReadFileName = *ReadFileName
}
if *GeneFileName != "" {
config.GeneFileName = *GeneFileName
}
if *GeneIdFileName != "" {
config.GeneIdFileName = *GeneIdFileName
}
if *WindowWidth != 0 {
config.WindowWidth = *WindowWidth
}
if *BloomSize != 0 {
config.BloomSize = uint64(*BloomSize)
}
if *NumHash != 0 {
config.NumHash = *NumHash
}
if *PMatch != 0 {
config.PMatch = *PMatch
}
if *MinDinuc != 0 {
config.MinDinuc = *MinDinuc
}
if *TempDir != "" {
config.TempDir = *TempDir
}
if *MinReadLength != 0 {
config.MinReadLength = *MinReadLength
}
if *MaxReadLength != 0 {
config.MaxReadLength = *MaxReadLength
}
if *MaxMatches != 0 {
config.MaxMatches = *MaxMatches
}
if *MaxMergeProcs != 0 {
config.MaxMergeProcs = *MaxMergeProcs
}
if *MatchMode != "" {
config.MatchMode = *MatchMode
}
if *MMTol != 0 {
config.MMTol = *MMTol
}
if *ResultsFileName != "" {
config.ResultsFileName = *ResultsFileName
}
if *NoCleanTmp {
config.NoCleanTmp = true
}
if config.ResultsFileName == "" {
print("ResultsFileName must be specified. Run 'muscato --help' for more information.\n\n")
os.Exit(1)
}
startpoint = *StartPoint
if *WindowsRaw != "" {
toks := strings.Split(*WindowsRaw, ",")
var itoks []int
for _, x := range toks {
y, err := strconv.Atoi(x)
if err != nil {
panic(err)
}
itoks = append(itoks, y)
}
config.Windows = itoks
}
}
func checkArgs() {
if config.ReadFileName == "" {
os.Stderr.WriteString("ReadFileName not provided, run 'muscato --help for more information.\n\n")
os.Exit(1)
}
if config.GeneFileName == "" {
os.Stderr.WriteString("GeneFileName not provided, run 'muscato --help for more information.\n\n")
os.Exit(1)
}
if config.GeneIdFileName == "" {
os.Stderr.WriteString("GeneIdFileName not provided, run 'muscato --help for more information.\n\n")
os.Exit(1)
}
if config.ResultsFileName == "" {
config.ResultsFileName = "results.txt"
os.Stderr.WriteString("ResultsFileName not provided, defaulting to 'results.txt'\n\n")
}
if len(config.Windows) == 0 {
os.Stderr.WriteString("Windows not provided, run 'muscato --help for more information.\n\n")
os.Exit(1)
}
if config.WindowWidth == 0 {
os.Stderr.WriteString("WindowWidth not provided, run 'muscato --help for more information.\n\n")
os.Exit(1)
}
if config.BloomSize == 0 {
os.Stderr.WriteString("BloomSize not provided, defaulting to 4 billion.\n\n")
config.BloomSize = 4 * 1000 * 1000 * 1000
}
if config.NumHash == 0 {
os.Stderr.WriteString("NumHash not provided, defaulting to 20.\n\n")
config.NumHash = 20
}
if config.PMatch == 0 {
os.Stderr.WriteString("PMatch not provided, defaulting to 1.\n\n")
config.PMatch = 1
}
if config.MaxReadLength == 0 {
os.Stderr.WriteString("MaxReadLength not provided, run 'muscato --help for more information.\n\n")
os.Exit(1)
}
if config.MaxMatches == 0 {
os.Stderr.WriteString("MaxMatches not provided, defaulting to 1 million\n\n")
config.MaxMatches = 1000 * 1000
}
if config.MaxMergeProcs == 0 {
os.Stderr.WriteString("MaxMergeProcs not provided, defaulting to 3\n\n")
config.MaxMergeProcs = 3
}
if !strings.HasSuffix(config.ReadFileName, ".fastq") {
msg := fmt.Sprintf("Warning: %s may not be a fastq file, continuing anyway\n\n",
config.ReadFileName)
os.Stderr.WriteString(msg)
}
if config.MatchMode == "" {
os.Stderr.WriteString("MatchMode not provided, defaulting to 'best'\n")
config.MatchMode = "best"
}
}
func setupEnvs() {
err := os.Setenv("LC_ALL", "C")
if err != nil {
panic(err)
}
home := os.Getenv("HOME")
gopath := path.Join(home, "go")
err = os.Setenv("GOPATH", gopath)
if err != nil {
panic(err)
}
err = os.Setenv("PATH", os.Getenv("PATH")+":"+home+"/go/bin")
if err != nil {
panic(err)
}
}
// Create the directory for all temporary files, if needed
func makeTemp() {
if config.TempDir == "" {
err := os.MkdirAll("muscato_tmp", 0755)
if err != nil {
panic(err)
}
tmpdir, err = ioutil.TempDir("muscato_tmp", "")
if err != nil {
panic(err)
}
config.TempDir = tmpdir
} else {
tmpdir = config.TempDir
err := os.MkdirAll(tmpdir, 0755)
if err != nil {
panic(err)
}
}
// The directory where all pipes are written.
pipedir = path.Join("/tmp/muscato/pipes", path.Base(tmpdir))
err := os.MkdirAll(pipedir, 0755)
if err != nil {
panic(err)
}
if config.LogDir == "" {
logdir := path.Join("muscato_logs", path.Base(tmpdir))
err := os.MkdirAll(logdir, 0755)
if err != nil {
panic(err)
}
config.LogDir = logdir
}
// Configure the temporary directory for sort.
sortTmpFlag = path.Join(tmpdir, "sort")
err = os.MkdirAll(sortTmpFlag, 0755)
if err != nil {
panic(err)
}
sortTmpFlag = "--temporary-directory=" + sortTmpFlag
}
func writeNonMatch() {
logger.Print("Starting writeNonMatch")
// Reader for the match file
inf, err := os.Open(config.ResultsFileName)
if err != nil {
panic(err)
}
defer inf.Close()
// Build a bloom filter based on the matched sequences
billion := uint(1000 * 1000 * 1000)
bf := bloom.New(4*billion, 5)
scanner := bufio.NewScanner(inf)
scanner.Buffer(make([]byte, 1024*1024), 1024*1024)
for scanner.Scan() {
f := bytes.Fields(scanner.Bytes())
bf.Add(f[0])
}
if err := scanner.Err(); err != nil {
panic(err)
}
// Open the nonmatch output file
a, b := path.Split(config.ResultsFileName)
c := strings.Split(b, ".")
d := c[len(c)-1]
c[len(c)-1] = "nonmatch"
c = append(c, d+".fastq")
outname := path.Join(a, strings.Join(c, "."))
out, err := os.Create(outname)
if err != nil {
panic(err)
}
defer out.Close()
wtr := bufio.NewWriter(out)
defer wtr.Flush()
// Check each read to see if it was matched.
rfname := path.Join(tmpdir, "reads_sorted.txt.sz")
inf, err = os.Open(rfname)
if err != nil {
panic(err)
}
defer inf.Close()
rdr := snappy.NewReader(inf)
scanner = bufio.NewScanner(rdr)
for scanner.Scan() {
f := bytes.Fields(scanner.Bytes())
if !bf.Test(f[0]) {
_, err := wtr.Write(f[2])
if err != nil {
panic(err)
}
_, err = wtr.Write([]byte("\n"))
if err != nil {
panic(err)
}
_, err = wtr.Write(f[0])
if err != nil {
panic(err)
}
_, err = wtr.Write([]byte("\n+\n"))
if err != nil {
panic(err)
}
_, err = wtr.Write(bytes.Repeat([]byte{'!'}, len(f[0])))
if err != nil {
panic(err)
}
_, err = wtr.Write([]byte("\n"))
if err != nil {
panic(err)
}
}
}
logger.Printf("writeNonMatch done")
}
func run() {
if startpoint <= 0 {
prepReads()
}
if startpoint <= 1 {
windowReads()
}
if startpoint <= 2 {
sortWindows()
}
if startpoint <= 3 {
screen()
}
if startpoint <= 4 {
sortBloom()
}
if startpoint <= 5 {
confirm()
}
if startpoint <= 6 {
combineWindows()
}
if startpoint <= 7 {
sortByGeneId()
}
if startpoint <= 8 {
joinGeneNames()
}
if startpoint <= 9 {
joinReadNames()
}
if startpoint <= 10 {
writeNonMatch()
}
}
func clean() {
logger.Printf("Removing pipes...")
err := os.RemoveAll(pipedir)
if err != nil {
logger.Print("Can't remove pipes:")
logger.Print(err)
logger.Printf("Continuing anyway...\n")
}
if !config.NoCleanTmp {
logger.Printf("Removing temporary files...")
err := os.RemoveAll(tmpdir)
if err != nil {
logger.Print("Can't remove temporary files:")
logger.Print(err)
logger.Printf("Continuing anyway...\n")
}
}
}
func main() {
handleArgs()
checkArgs()
setupEnvs()
makeTemp()
saveConfig(config)
setupLog()
logger.Printf("Storing temporary files in %s", tmpdir)
logger.Printf("Storing log files in %s", config.LogDir)
run()
clean()
logger.Printf("All done, exiting")
}
more use of scipipe
// Copyright 2017, Kerby Shedden and the Muscato contributors.
// Muscato (Multi-Genome Scalable Alignment Tool) is a software tool
// for matching large collections of reads into large collections of
// target sequence (e.g. transcript sequences).
//
// Muscato uses a two-stage approach. First, high-entropy
// subsequences of the reads are used to produce Bloom filter sketches
// of the read collection. These sketches are used to identify
// candidate matches. For example, if three offsets are chosen at
// positions 0, 20, and 40 of the reads, then three Bloom filter
// sketches are constructed. Then, every window in the target
// sequence collection is queried against these sketches, identifying
// a set of candidate matches. In the next step, for every
// subsequence appearing at each read offset, all reads and all genes
// containing the subsequence are assessed for pairwise similarity,
// and read/target pairs showing sufficiently high similarity are
// retained.
//
// This script is the entry point for the Muscato tool. Normally,
// this is the only script that will be run directly. It calls the
// other Muscato scripts in turn.
//
// Muscato can be invoked either using a configuration file in JSON
// format, or using command-line flags. A typical invocation using
// flags is:
//
// muscato --ResultsFileName=results.txt --ReadFileName=reads.fastq --GeneFileName=genes.txt.sz --GeneIdFileName=1genes_ids.txt.sz
// --Windows=0,20,40,60,80 --WindowWidth=15 --BloomSize=4000000000 --NumHash=20 --PMatch=0.96 --MinDinuc=5 --MinReadLength=50
// --MaxMatches=1000000 --MaxMergeProcs=5 --MaxReadLength=300 --MatchMode=best --MMTol=2
//
// To use a JSON config file, create a file with the flag information in JSON format, e.g.
//
// {ResultsFileName: "results.txt", MaxReadLength: 300, ...}
//
// Then provide the configuration file path when invoking Muscato, e.g.
//
// muscato --ConfigFileName=config.json
//
// Note that before running muscato, it is necessary to produce a
// processed version of the target sequence data. This can be done
// using the muscato_prep_targets tool, invoked as follows.
//
// muscato_prep_targets genes.fasta
//
// See utils/Config.go for the full set of configuration parameters.
//
// Muscato generates a number of intermediate files and logs that by
// default are placed into the directory tmp/#####, where ##### is a
// generated number. This temporary directory can be deleted after a
// successful run if desired. The log files in the tmp directory may
// contain useful information for troubleshooting.
//
// Since Muscato uses Unix-style FIFOs for interprocess communication,
// it can only be run on Unix-like systems at present. For the same
// reason, Muscato may not be runnable from AFS or NFS implementations
// that do not support FIFOs.
package main
import (
"bufio"
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"math/rand"
"os"
"os/exec"
"path"
"strconv"
"strings"
"time"
"github.com/golang/snappy"
"github.com/google/uuid"
"github.com/kshedden/muscato/utils"
"github.com/scipipe/scipipe"
"github.com/willf/bloom"
"golang.org/x/sys/unix"
)
var (
configFilePath string
config *utils.Config
basename string
pipedir string
logger *log.Logger
// Flag for setting the tmp file location for sorting.
sortTmpFlag string
)
const (
sortbuf string = "-S 2G"
sortpar string = "--parallel=8"
)
func pipename() string {
f := fmt.Sprintf("%09d", rand.Int63()%1e9)
return path.Join(pipedir, f)
}
// pipefromsz creates a fifo and starts decompressing the given snappy
// file into it.
func pipefromsz(fname string) string {
rand.Seed(int64(time.Now().UnixNano() + int64(os.Getpid())))
for k := 0; k < 10; k++ {
name := pipename()
err := unix.Mkfifo(name, 0755)
if err == nil {
go func() {
cmd := exec.Command("sztool", "-d", fname, name)
cmd.Env = os.Environ()
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
panic(err)
}
}()
return name
}
print(fmt.Sprintf("%v\n", err))
}
panic("unable to create pipe")
}
func prepReads() {
logger.Printf("Starting prepReads")
logger.Printf("Running command: 'muscato_prep_reads %s'", configFilePath)
cmd0 := exec.Command("muscato_prep_reads", configFilePath)
cmd0.Env = os.Environ()
cmd0.Stderr = os.Stderr
cmd1 := exec.Command("sort", sortbuf, sortpar, sortTmpFlag)
cmd1.Env = os.Environ()
cmd1.Stderr = os.Stderr
var err error
cmd1.Stdin, err = cmd0.StdoutPipe()
if err != nil {
panic(err)
}
pip, err := cmd1.StdoutPipe()
if err != nil {
panic(err)
}
cmds := []*exec.Cmd{cmd0, cmd1}
for _, cmd := range cmds {
err = cmd.Start()
if err != nil {
panic(err)
}
}
scanner := bufio.NewScanner(pip)
buf := make([]byte, 1024*1024)
scanner.Buffer(buf, len(buf))
// File for sequences
outname := path.Join(config.TempDir, "reads_sorted.txt.sz")
logger.Printf("Writing sequences to %s", outname)
fid, err := os.Create(outname)
if err != nil {
panic(err)
}
defer fid.Close()
wtr := snappy.NewBufferedWriter(fid)
defer wtr.Close()
// Get the first line
if !scanner.Scan() {
logger.Printf("no input")
panic("no input (is the read file empty?)")
}
if err := scanner.Err(); err != nil {
panic(err)
}
fields := strings.Fields(scanner.Text())
seq := fields[0]
name := []string{fields[1]}
n := 1
nseq := 0
dowrite := func(seq string, name []string, n int) {
xn := strings.Join(name, ";")
if len(xn) > 1000 {
xn = xn[0:995]
xn += "..."
}
nseq++
_, err = wtr.Write([]byte(seq))
if err != nil {
panic(err)
}
_, err = wtr.Write([]byte("\t"))
if err != nil {
panic(err)
}
s := fmt.Sprintf("%d\t%s\n", n, xn)
_, err = wtr.Write([]byte(s))
if err != nil {
panic(err)
}
}
for scanner.Scan() {
line := scanner.Text()
fields1 := strings.Fields(line)
seq1 := fields1[0]
name1 := fields1[1]
if strings.Compare(seq, seq1) == 0 {
n++
name = append(name, name1)
continue
}
dowrite(seq, name, n)
seq = seq1
name = name[0:1]
name[0] = name1
n = 1
}
if err := scanner.Err(); err != nil {
panic(err)
}
// Read to EOF before calling wait.
dowrite(seq, name, n)
for _, cmd := range cmds {
if err := cmd.Wait(); err != nil {
log.Fatal(err)
}
}
logger.Printf(fmt.Sprintf("Wrote %d read sequences", nseq))
logger.Printf("prepReads done")
}
func windowReads() {
logger.Printf("starting windowReads")
logger.Printf("Running command: 'muscato_window_reads %s'\n", configFilePath)
cmd := exec.Command("muscato_window_reads", configFilePath)
cmd.Env = os.Environ()
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
panic(err)
}
logger.Printf("windowReads done")
}
func sortWindows() {
logger.Printf("starting sortWindows")
for k := 0; k < len(config.Windows); k++ {
logger.Printf("sortWindows %d...", k)
// Decompress matches
fn := path.Join(config.TempDir, fmt.Sprintf("win_%d.txt.sz", k))
dc := scipipe.NewProc("dc", fmt.Sprintf("sztool -d %s > {os:dx}", fn))
dc.SetPathStatic("dx", path.Join(pipedir, fmt.Sprintf("sw_dc_%d", k)))
// Sort the matches
sc := fmt.Sprintf("sort %s %s -k1 %s {i:in} > {o:sort}", sortbuf, sortpar, sortTmpFlag)
sm := scipipe.NewProc("sm", sc)
logger.Printf(sc)
sm.SetPathStatic("sort", path.Join(pipedir, fmt.Sprintf("sw_sort_%d", k)))
// Compress results
fn = strings.Replace(fn, ".txt.sz", "_sorted.txt.sz", 1)
rc := scipipe.NewProc("rc", fmt.Sprintf("sztool -c {i:ins} %s", fn))
// Connect the network
sm.In("in").Connect(dc.Out("dx"))
rc.In("ins").Connect(sm.Out("sort"))
wf := scipipe.NewWorkflow("sw")
wf.AddProcs(dc, sm, rc)
wf.SetDriver(rc)
wf.Run()
logger.Printf("done\n")
}
logger.Printf("sortWindows done")
}
func screen() {
logger.Printf("Starting screening")
logger.Printf("Running command: 'muscato_screen %s'\n", configFilePath)
cmd := exec.Command("muscato_screen", configFilePath)
cmd.Env = os.Environ()
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
panic(err)
}
logger.Printf("Screening done")
}
func sortBloom() {
logger.Printf("Starting sortBloom")
for k := range config.Windows {
logger.Printf("sortBloom %d...", k)
// Decompress matches
fn := path.Join(config.TempDir, fmt.Sprintf("bmatch_%d.txt.sz", k))
dc := scipipe.NewProc("dc", fmt.Sprintf("sztool -d %s > {os:dx}", fn))
dc.SetPathStatic("dx", path.Join(pipedir, fmt.Sprintf("sb_dc_%d", k)))
// Sort the matches
c := fmt.Sprintf("sort %s %s -k1 %s {i:in} > {os:sort}", sortbuf, sortpar, sortTmpFlag)
logger.Printf(c)
sm := scipipe.NewProc("sm", c)
sm.SetPathStatic("sort", path.Join(pipedir, fmt.Sprintf("sb_sort_%d", k)))
// Compress results
fn = path.Join(config.TempDir, fmt.Sprintf("smatch_%d.txt.sz", k))
rc := scipipe.NewProc("rc", fmt.Sprintf("sztool -c {i:ins} %s", fn))
// Connect the network
sm.In("in").Connect(dc.Out("dx"))
rc.In("ins").Connect(sm.Out("sort"))
wf := scipipe.NewWorkflow("sb")
wf.AddProcs(dc, sm, rc)
wf.SetDriver(rc)
wf.Run()
logger.Printf("done")
}
logger.Printf("sortBloom done")
}
func confirm() {
logger.Printf("starting match confirmation")
fp := 0
for {
nproc := config.MaxMergeProcs
if nproc > len(config.Windows)-fp {
nproc = len(config.Windows) - fp
}
if nproc == 0 {
break
}
var cmds []*exec.Cmd
for k := fp; k < fp+nproc; k++ {
logger.Printf("Starting a round of confirmation processes")
logger.Printf("Running command: 'muscato_confirm %s %d'\n", configFilePath, k)
cmd := exec.Command("muscato_confirm", configFilePath, fmt.Sprintf("%d", k))
cmd.Env = os.Environ()
cmd.Stderr = os.Stderr
err := cmd.Start()
if err != nil {
panic(err)
}
cmds = append(cmds, cmd)
}
for _, cmd := range cmds {
err := cmd.Wait()
if err != nil {
panic(err)
}
}
fp += nproc
}
logger.Printf("match confirmation done")
}
// writebest accepts a set of lines (lines), which have also been
// broken into fields (bfr). Every line represents a candidate match.
// The matches with at most mmtol more matches than the best match are
// written to the io writer (wtr). ibuf is provided workspace.
func writebest(lines []string, bfr [][]string, wtr io.Writer, ibuf []int, mmtol int) []int {
// Find the best fit, determine the number of mismatches for each sequence.
ibuf = ibuf[0:0]
best := -1
for _, x := range bfr {
y, err := strconv.Atoi(x[3]) // 3 is position of nmiss
if err != nil {
panic(err)
}
if best == -1 || y < best {
best = y
}
ibuf = append(ibuf, y)
}
// Output the sequences with acceptable number of mismatches.
for i, x := range lines {
if ibuf[i] <= best+mmtol {
_, err := wtr.Write([]byte(x))
if err != nil {
panic(err)
}
_, err = wtr.Write([]byte("\n"))
if err != nil {
panic(err)
}
}
}
return ibuf
}
func combineWindows() {
logger.Printf("starting combineWindows")
mmtol := config.MMTol
// Pipe everything into one sort/unique
c0 := exec.Command("sort", sortbuf, sortpar, sortTmpFlag, "-u", "-")
c0.Env = os.Environ()
c0.Stderr = os.Stderr
cmds := []*exec.Cmd{c0}
// The sorted results go to disk
outname := path.Join(config.TempDir, "matches.txt.sz")
out, err := os.Create(outname)
if err != nil {
panic(err)
}
wtr := snappy.NewBufferedWriter(out)
// TODO: Add Bloom filter here to screen out duplicates
var fd []io.Reader
for j := 0; j < len(config.Windows); j++ {
f := fmt.Sprintf("rmatch_%d.txt.sz", j)
fname := path.Join(config.TempDir, f)
c := exec.Command("sztool", "-d", fname)
c.Env = os.Environ()
c.Stderr = os.Stderr
cmds = append(cmds, c)
p, err := c.StdoutPipe()
if err != nil {
panic(err)
}
fd = append(fd, p)
}
c0.Stdin = io.MultiReader(fd...)
da, err := c0.StdoutPipe()
if err != nil {
panic(err)
}
for _, c := range cmds {
err := c.Start()
if err != nil {
panic(err)
}
}
// Taking all matches for the same read, retain only those
// with nmiss equal to at most one greater than the lowest
// nmiss.
sem := make(chan bool, 1)
sem <- true
// DEBUG used to be go func()
func() {
scanner := bufio.NewScanner(da)
var lines []string
var fields [][]string
var ibuf []int
var current string
for scanner.Scan() {
line := scanner.Text()
field := strings.Fields(line)
// Add to the current block.
if current == "" || field[0] == current {
lines = append(lines, line)
fields = append(fields, field)
current = field[0]
continue
}
// Process a block
ibuf = writebest(lines, fields, wtr, ibuf, mmtol)
lines = lines[0:0]
lines = append(lines, line)
fields = fields[0:0]
fields = append(fields, field)
current = field[0]
}
if err := scanner.Err(); err == nil {
// Process the final block if possible
writebest(lines, fields, wtr, ibuf, mmtol)
} else {
// Should never get here, but just in case log
// the error but don't try to process the
// remaining lines which may be corrupted.
logger.Printf("%v", err)
}
<-sem
}()
// OK to call Wait, done reading.
for _, c := range cmds {
err := c.Wait()
if err != nil {
panic(err)
}
}
sem <- true
wtr.Close()
out.Close()
logger.Printf("combineWindows done")
}
func sortByGeneId() {
logger.Printf("starting sortByGeneid")
inname := path.Join(config.TempDir, "matches.txt.sz")
outname := path.Join(config.TempDir, "matches_sg.txt.sz")
// Sort by gene number
cmd1 := exec.Command("sztool", "-d", inname)
cmd1.Env = os.Environ()
cmd1.Stderr = os.Stderr
// k5 is position of gene id
cmd2 := exec.Command("sort", sortbuf, sortpar, sortTmpFlag, "-k5", "-")
cmd2.Env = os.Environ()
cmd2.Stderr = os.Stderr
var err error
cmd2.Stdin, err = cmd1.StdoutPipe()
if err != nil {
panic(err)
}
cmd3 := exec.Command("sztool", "-c", "-", outname)
cmd3.Env = os.Environ()
cmd3.Stderr = os.Stderr
cmd3.Stdin, err = cmd2.StdoutPipe()
if err != nil {
panic(err)
}
// Order matters
cmds := []*exec.Cmd{cmd3, cmd2, cmd1}
for _, c := range cmds {
err := c.Start()
if err != nil {
panic(err)
}
}
// Call Wait from end to beginning of chained commands
for _, c := range cmds {
err := c.Wait()
if err != nil {
panic(err)
}
}
logger.Printf("sortbyGeneId done")
}
func joinGeneNames() {
logger.Printf("starting joinGeneNames")
// Decompress matches
ma := scipipe.NewProc("ma", fmt.Sprintf("sztool -d %s > {os:ma}", path.Join(config.TempDir, "matches_sg.txt.sz")))
ma.SetPathStatic("ma", path.Join(pipedir, "jgn_ma.txt"))
// Decompress gene ids
gn := scipipe.NewProc("gn", fmt.Sprintf("sztool -d %s > {os:gn}", config.GeneIdFileName))
gn.SetPathStatic("gn", path.Join(pipedir, "jgn_gn.txt"))
// Join genes and matches
jo := scipipe.NewProc("jo", "join -1 5 -2 1 -t'\t' {i:mx} {i:gx} > {os:jx}")
jo.SetPathStatic("jx", path.Join(pipedir, "jgn_joined.txt"))
// Cut out unwanted column
ct := scipipe.NewProc("ct", "cut -d'\t' -f 1 --complement {i:jy} > {os:co}")
ct.SetPathStatic("co", path.Join(pipedir, "jgn_cut.txt"))
// Compress the result
sz := scipipe.NewProc("sz", fmt.Sprintf("sztool -c {i:zi} %s", path.Join(config.TempDir, "matches_sn.txt.sz")))
jo.In("mx").Connect(ma.Out("ma"))
jo.In("gx").Connect(gn.Out("gn"))
ct.In("jy").Connect(jo.Out("jx"))
sz.In("zi").Connect(ct.Out("co"))
wf := scipipe.NewWorkflow("jgn")
wf.AddProcs(ma, gn, jo, ct, sz)
wf.SetDriver(sz)
wf.Run()
logger.Printf("joinGeneNames done")
}
func joinReadNames() {
logger.Printf("starting joinReadNames")
// The workflow hangs if the results file already exists, so
// remove it.
_, err := os.Stat(config.ResultsFileName)
if err == nil {
err := os.Remove(config.ResultsFileName)
if err != nil {
panic(err)
}
} else if os.IsNotExist(err) {
// do nothing
} else {
panic(err)
}
// Decompress matches
ma := scipipe.NewProc("ma", fmt.Sprintf("sztool -d %s > {os:ma}",
path.Join(config.TempDir, "matches_sn.txt.sz")))
ma.SetPathStatic("ma", path.Join(pipedir, "jrn_ma.txt"))
// Decompress sorted reads
rd := scipipe.NewProc("rd", fmt.Sprintf("sztool -d %s > {os:rd}",
path.Join(config.TempDir, "reads_sorted.txt.sz")))
rd.SetPathStatic("rd", path.Join(pipedir, "jrn_rd.txt"))
// Sort the matches
sm := scipipe.NewProc("sm", fmt.Sprintf("sort %s %s -k1 %s {i:in} > {os:sort}", sortbuf, sortpar, sortTmpFlag))
sm.SetPathStatic("sort", path.Join(pipedir, "jrn_sort.txt"))
// Join the sorted matches with the reads
jo := scipipe.NewProc("jo", "join -1 1 -2 1 -t'\t' {i:srx} {i:rdx} > {o:out}")
jo.SetPathStatic("out", config.ResultsFileName)
snk := scipipe.NewSink("snk")
// Connect the network
sm.In("in").Connect(ma.Out("ma"))
jo.In("srx").Connect(sm.Out("sort"))
jo.In("rdx").Connect(rd.Out("rd"))
snk.Connect(jo.Out("out"))
wf := scipipe.NewWorkflow("jrn")
wf.AddProcs(ma, rd, sm, jo)
wf.SetDriver(snk)
wf.Run()
logger.Printf("joinReadNames done")
}
func setupLog() {
logname := path.Join(config.LogDir, "muscato.log")
fid, err := os.Create(logname)
if err != nil {
panic(err)
}
logger = log.New(fid, "", log.Ltime)
}
// saveConfig saves the configuration file in json format into the log
// directory.
func saveConfig(config *utils.Config) {
fid, err := os.Create(path.Join(config.LogDir, "config.json"))
if err != nil {
panic(err)
}
defer fid.Close()
enc := json.NewEncoder(fid)
err = enc.Encode(config)
if err != nil {
panic(err)
}
configFilePath = path.Join(config.LogDir, "config.json")
}
func handleArgs() {
ConfigFileName := flag.String("ConfigFileName", "", "JSON file containing configuration parameters")
ReadFileName := flag.String("ReadFileName", "", "Sequencing read file (fastq format)")
GeneFileName := flag.String("GeneFileName", "", "Gene file name (processed form)")
GeneIdFileName := flag.String("GeneIdFileName", "", "Gene ID file name (processed form)")
ResultsFileName := flag.String("ResultsFileName", "", "File name for results")
WindowsRaw := flag.String("Windows", "", "Starting position of each window")
WindowWidth := flag.Int("WindowWidth", 0, "Width of each window")
BloomSize := flag.Int("BloomSize", 0, "Size of Bloom filter, in bits")
NumHash := flag.Int("NumHash", 0, "Number of hashses")
PMatch := flag.Float64("PMatch", 0, "Required proportion of matching positions")
MinDinuc := flag.Int("MinDinuc", 0, "Minimum number of dinucleotides to check for match")
TempDir := flag.String("TempDir", "", "Workspace for temporary files")
MinReadLength := flag.Int("MinReadLength", 0, "Reads shorter than this length are skipped")
MaxReadLength := flag.Int("MaxReadLength", 0, "Reads longer than this length are truncated")
MaxMatches := flag.Int("MaxMatches", 0, "Return no more than this number of matches per window")
MaxMergeProcs := flag.Int("MaxMergeProcs", 0, "Run this number of merge processes concurrently")
MMTol := flag.Int("MMTol", 0, "Number of mismatches allowed above best fit")
MatchMode := flag.String("MatchMode", "", "'first' (retain first matches meeting criteria) or 'best' (returns best matches meeting criteria)")
NoCleanTmp := flag.Bool("NoCleanTmp", false, "Leave temporary files in TempDir")
flag.Parse()
if *ConfigFileName != "" {
config = utils.ReadConfig(*ConfigFileName)
} else {
config = new(utils.Config)
}
if *ReadFileName != "" {
config.ReadFileName = *ReadFileName
}
if *GeneFileName != "" {
config.GeneFileName = *GeneFileName
}
if *GeneIdFileName != "" {
config.GeneIdFileName = *GeneIdFileName
}
if *WindowWidth != 0 {
config.WindowWidth = *WindowWidth
}
if *BloomSize != 0 {
config.BloomSize = uint64(*BloomSize)
}
if *NumHash != 0 {
config.NumHash = *NumHash
}
if *PMatch != 0 {
config.PMatch = *PMatch
}
if *MinDinuc != 0 {
config.MinDinuc = *MinDinuc
}
if *TempDir != "" {
config.TempDir = *TempDir
}
if *MinReadLength != 0 {
config.MinReadLength = *MinReadLength
}
if *MaxReadLength != 0 {
config.MaxReadLength = *MaxReadLength
}
if *MaxMatches != 0 {
config.MaxMatches = *MaxMatches
}
if *MaxMergeProcs != 0 {
config.MaxMergeProcs = *MaxMergeProcs
}
if *MatchMode != "" {
config.MatchMode = *MatchMode
}
if *MMTol != 0 {
config.MMTol = *MMTol
}
if *ResultsFileName != "" {
config.ResultsFileName = *ResultsFileName
}
if *NoCleanTmp {
config.NoCleanTmp = true
}
if config.ResultsFileName == "" {
print("ResultsFileName must be specified. Run 'muscato --help' for more information.\n\n")
os.Exit(1)
}
if *WindowsRaw != "" {
toks := strings.Split(*WindowsRaw, ",")
var itoks []int
for _, x := range toks {
y, err := strconv.Atoi(x)
if err != nil {
panic(err)
}
itoks = append(itoks, y)
}
config.Windows = itoks
}
}
func checkArgs() {
if config.ReadFileName == "" {
os.Stderr.WriteString("ReadFileName not provided, run 'muscato --help for more information.\n\n")
os.Exit(1)
}
if config.GeneFileName == "" {
os.Stderr.WriteString("GeneFileName not provided, run 'muscato --help for more information.\n\n")
os.Exit(1)
}
if config.GeneIdFileName == "" {
os.Stderr.WriteString("GeneIdFileName not provided, run 'muscato --help for more information.\n\n")
os.Exit(1)
}
if config.ResultsFileName == "" {
config.ResultsFileName = "results.txt"
os.Stderr.WriteString("ResultsFileName not provided, defaulting to 'results.txt'\n\n")
}
if len(config.Windows) == 0 {
os.Stderr.WriteString("Windows not provided, run 'muscato --help for more information.\n\n")
os.Exit(1)
}
if config.WindowWidth == 0 {
os.Stderr.WriteString("WindowWidth not provided, run 'muscato --help for more information.\n\n")
os.Exit(1)
}
if config.BloomSize == 0 {
os.Stderr.WriteString("BloomSize not provided, defaulting to 4 billion.\n\n")
config.BloomSize = 4 * 1000 * 1000 * 1000
}
if config.NumHash == 0 {
os.Stderr.WriteString("NumHash not provided, defaulting to 20.\n\n")
config.NumHash = 20
}
if config.PMatch == 0 {
os.Stderr.WriteString("PMatch not provided, defaulting to 1.\n\n")
config.PMatch = 1
}
if config.MaxReadLength == 0 {
os.Stderr.WriteString("MaxReadLength not provided, run 'muscato --help for more information.\n\n")
os.Exit(1)
}
if config.MaxMatches == 0 {
os.Stderr.WriteString("MaxMatches not provided, defaulting to 1 million\n\n")
config.MaxMatches = 1000 * 1000
}
if config.MaxMergeProcs == 0 {
os.Stderr.WriteString("MaxMergeProcs not provided, defaulting to 3\n\n")
config.MaxMergeProcs = 3
}
if !strings.HasSuffix(config.ReadFileName, ".fastq") {
msg := fmt.Sprintf("Warning: %s may not be a fastq file, continuing anyway\n\n",
config.ReadFileName)
os.Stderr.WriteString(msg)
}
if config.MatchMode == "" {
os.Stderr.WriteString("MatchMode not provided, defaulting to 'best'\n")
config.MatchMode = "best"
}
}
func setupEnvs() {
err := os.Setenv("LC_ALL", "C")
if err != nil {
panic(err)
}
home := os.Getenv("HOME")
gopath := path.Join(home, "go")
err = os.Setenv("GOPATH", gopath)
if err != nil {
panic(err)
}
err = os.Setenv("PATH", os.Getenv("PATH")+":"+home+"/go/bin")
if err != nil {
panic(err)
}
}
// Create the directory for all temporary files, if needed
func makeTemp() {
// temp files, log files, etc. are stored in directories defined by this unique id.
xuid, err := uuid.NewUUID()
if err != nil {
panic(err)
}
uid := xuid.String()
if config.TempDir == "" {
config.TempDir = path.Join("muscato_tmp", uid)
} else {
// Overwrite the provided TempDir with a subdirectory.
config.TempDir = path.Join(config.TempDir, uid)
}
err = os.MkdirAll(config.TempDir, 0755)
if err != nil {
panic(err)
}
// The directory where all pipes are written, needs to be in a
// filesystem that supports pipes..
pipedir = path.Join("/tmp/muscato/pipes", uid)
err = os.MkdirAll(pipedir, 0755)
if err != nil {
panic(err)
}
// Setup the directory for logging.
if config.LogDir == "" {
config.LogDir = "muscato_logs"
}
config.LogDir = path.Join(config.LogDir, uid)
err = os.MkdirAll(config.LogDir, 0755)
if err != nil {
panic(err)
}
// Configure the temporary directory for sort.
sortTmpFlag = path.Join(config.TempDir, "sort")
err = os.MkdirAll(sortTmpFlag, 0755)
if err != nil {
panic(err)
}
sortTmpFlag = "--temporary-directory=" + sortTmpFlag
}
func writeNonMatch() {
logger.Print("Starting writeNonMatch")
// Reader for the match file
inf, err := os.Open(config.ResultsFileName)
if err != nil {
panic(err)
}
defer inf.Close()
// Build a bloom filter based on the matched sequences
billion := uint(1000 * 1000 * 1000)
bf := bloom.New(4*billion, 5)
scanner := bufio.NewScanner(inf)
scanner.Buffer(make([]byte, 1024*1024), 1024*1024)
for scanner.Scan() {
f := bytes.Fields(scanner.Bytes())
bf.Add(f[0])
}
if err := scanner.Err(); err != nil {
panic(err)
}
// Open the nonmatch output file
a, b := path.Split(config.ResultsFileName)
c := strings.Split(b, ".")
d := c[len(c)-1]
c[len(c)-1] = "nonmatch"
c = append(c, d+".fastq")
outname := path.Join(a, strings.Join(c, "."))
out, err := os.Create(outname)
if err != nil {
panic(err)
}
defer out.Close()
wtr := bufio.NewWriter(out)
defer wtr.Flush()
// Check each read to see if it was matched.
rfname := path.Join(config.TempDir, "reads_sorted.txt.sz")
inf, err = os.Open(rfname)
if err != nil {
panic(err)
}
defer inf.Close()
rdr := snappy.NewReader(inf)
scanner = bufio.NewScanner(rdr)
for scanner.Scan() {
f := bytes.Fields(scanner.Bytes())
if !bf.Test(f[0]) {
_, err := wtr.Write(f[2])
if err != nil {
panic(err)
}
_, err = wtr.Write([]byte("\n"))
if err != nil {
panic(err)
}
_, err = wtr.Write(f[0])
if err != nil {
panic(err)
}
_, err = wtr.Write([]byte("\n+\n"))
if err != nil {
panic(err)
}
_, err = wtr.Write(bytes.Repeat([]byte{'!'}, len(f[0])))
if err != nil {
panic(err)
}
_, err = wtr.Write([]byte("\n"))
if err != nil {
panic(err)
}
}
}
logger.Printf("writeNonMatch done")
}
func run() {
prepReads()
windowReads()
sortWindows()
screen()
sortBloom()
confirm()
combineWindows()
sortByGeneId()
joinGeneNames()
joinReadNames()
writeNonMatch()
}
func clean() {
logger.Printf("Removing pipes...")
err := os.RemoveAll(pipedir)
if err != nil {
logger.Print("Can't remove pipes:")
logger.Print(err)
logger.Printf("Continuing anyway...\n")
}
if !config.NoCleanTmp {
logger.Printf("Removing temporary files...")
err := os.RemoveAll(config.TempDir)
if err != nil {
logger.Print("Can't remove temporary files:")
logger.Print(err)
logger.Printf("Continuing anyway...\n")
}
}
}
func main() {
handleArgs()
checkArgs()
setupEnvs()
makeTemp()
saveConfig(config)
setupLog()
logger.Printf("Storing temporary files in %s", config.TempDir)
logger.Printf("Storing log files in %s", config.LogDir)
run()
clean()
logger.Printf("All done, exiting")
}
|
//go:generate struct-markdown
//go:generate mapstructure-to-hcl2 -type Config,SharedImageGallery,SharedImageGalleryDestination,PlanInformation
package arm
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
"net"
"regexp"
"strings"
"time"
"github.com/hashicorp/packer/common/random"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"github.com/masterzen/winrm"
azcommon "github.com/hashicorp/packer/builder/azure/common"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/hashicorp/packer/builder/azure/common/constants"
"github.com/hashicorp/packer/builder/azure/pkcs12"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/hcl2template"
"github.com/hashicorp/packer/helper/communicator"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
"golang.org/x/crypto/ssh"
)
const (
DefaultImageVersion = "latest"
DefaultUserName = "packer"
DefaultPrivateVirtualNetworkWithPublicIp = false
DefaultVMSize = "Standard_A1"
DefaultKeyVaultSKU = "standard"
)
const (
// https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#naming-rules-and-restrictions
// Regular expressions in Go are not expressive enough, such that the regular expression returned by Azure
// can be used (no backtracking).
//
// -> ^[^_\W][\w-._]{0,79}(?<![-.])$
//
// This is not an exhaustive match, but it should be extremely close.
validResourceGroupNameRe = "^[^_\\W][\\w-._\\(\\)]{0,89}$"
validManagedDiskName = "^[^_\\W][\\w-._)]{0,79}$"
validResourceNamePrefix = "^[^_\\W][\\w-._)]{0,10}$"
)
var (
reCaptureContainerName = regexp.MustCompile(`^[a-z0-9][a-z0-9\-]{2,62}$`)
reCaptureNamePrefix = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9_\-\.]{0,23}$`)
reManagedDiskName = regexp.MustCompile(validManagedDiskName)
reResourceGroupName = regexp.MustCompile(validResourceGroupNameRe)
reSnapshotName = regexp.MustCompile(`^[A-Za-z0-9_]{1,79}$`)
reSnapshotPrefix = regexp.MustCompile(`^[A-Za-z0-9_]{1,59}$`)
reResourceNamePrefix = regexp.MustCompile(validResourceNamePrefix)
)
type PlanInformation struct {
PlanName string `mapstructure:"plan_name"`
PlanProduct string `mapstructure:"plan_product"`
PlanPublisher string `mapstructure:"plan_publisher"`
PlanPromotionCode string `mapstructure:"plan_promotion_code"`
}
type SharedImageGallery struct {
Subscription string `mapstructure:"subscription"`
ResourceGroup string `mapstructure:"resource_group"`
GalleryName string `mapstructure:"gallery_name"`
ImageName string `mapstructure:"image_name"`
// Specify a specific version of an OS to boot from.
// Defaults to latest. There may be a difference in versions available
// across regions due to image synchronization latency. To ensure a consistent
// version across regions set this value to one that is available in all
// regions where you are deploying.
ImageVersion string `mapstructure:"image_version" required:"false"`
}
type SharedImageGalleryDestination struct {
SigDestinationResourceGroup string `mapstructure:"resource_group"`
SigDestinationGalleryName string `mapstructure:"gallery_name"`
SigDestinationImageName string `mapstructure:"image_name"`
SigDestinationImageVersion string `mapstructure:"image_version"`
SigDestinationReplicationRegions []string `mapstructure:"replication_regions"`
}
type Config struct {
common.PackerConfig `mapstructure:",squash"`
// Authentication via OAUTH
ClientConfig client.Config `mapstructure:",squash"`
// If set with one or more resource ids of user assigned managed identities, they will be configured on the VM.
// See [documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token)
// for how to acquire tokens within the VM.
// To assign a user assigned managed identity to a VM, the provided account or service principal must have [Managed Identity Operator](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#managed-identity-operator)
// and [Virtual Machine Contributor](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#virtual-machine-contributor) role assignments.
UserAssignedManagedIdentities []string `mapstructure:"user_assigned_managed_identities" required:"false"`
// VHD prefix.
CaptureNamePrefix string `mapstructure:"capture_name_prefix"`
// Destination container name.
CaptureContainerName string `mapstructure:"capture_container_name"`
// Use a [Shared Gallery
// image](https://azure.microsoft.com/en-us/blog/announcing-the-public-preview-of-shared-image-gallery/)
// as the source for this build. *VHD targets are incompatible with this
// build type* - the target must be a *Managed Image*.
//
// "shared_image_gallery": {
// "subscription": "00000000-0000-0000-0000-00000000000",
// "resource_group": "ResourceGroup",
// "gallery_name": "GalleryName",
// "image_name": "ImageName",
// "image_version": "1.0.0"
// }
// "managed_image_name": "TargetImageName",
// "managed_image_resource_group_name": "TargetResourceGroup"
SharedGallery SharedImageGallery `mapstructure:"shared_image_gallery" required:"false"`
// The name of the Shared Image Gallery under which the managed image will be published as Shared Gallery Image version.
//
// Following is an example.
//
// "shared_image_gallery_destination": {
// "resource_group": "ResourceGroup",
// "gallery_name": "GalleryName",
// "image_name": "ImageName",
// "image_version": "1.0.0",
// "replication_regions": ["regionA", "regionB", "regionC"]
// }
// "managed_image_name": "TargetImageName",
// "managed_image_resource_group_name": "TargetResourceGroup"
SharedGalleryDestination SharedImageGalleryDestination `mapstructure:"shared_image_gallery_destination"`
// How long to wait for an image to be published to the shared image
// gallery before timing out. If your Packer build is failing on the
// Publishing to Shared Image Gallery step with the error `Original Error:
// context deadline exceeded`, but the image is present when you check your
// Azure dashboard, then you probably need to increase this timeout from
// its default of "60m" (valid time units include `s` for seconds, `m` for
// minutes, and `h` for hours.)
SharedGalleryTimeout time.Duration `mapstructure:"shared_image_gallery_timeout"`
// The end of life date (2006-01-02T15:04:05.99Z) of the gallery Image Version. This property
// can be used for decommissioning purposes.
SharedGalleryImageVersionEndOfLifeDate string `mapstructure:"shared_gallery_image_version_end_of_life_date" required:"false"`
// The number of replicas of the Image Version to be created per region. This
// property would take effect for a region when regionalReplicaCount is not specified.
// Replica count must be between 1 and 10.
SharedGalleryImageVersionReplicaCount int32 `mapstructure:"shared_image_gallery_replica_count" required:"false"`
// If set to true, Virtual Machines deployed from the latest version of the
// Image Definition won't use this Image Version.
SharedGalleryImageVersionExcludeFromLatest bool `mapstructure:"shared_gallery_image_version_exclude_from_latest" required:"false"`
// Name of the publisher to use for your base image (Azure Marketplace Images only). See
// [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
// for details.
//
// CLI example `az vm image list-publishers --location westus`
ImagePublisher string `mapstructure:"image_publisher" required:"true"`
// Name of the publisher's offer to use for your base image (Azure Marketplace Images only). See
// [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
// for details.
//
// CLI example
// `az vm image list-offers --location westus --publisher Canonical`
ImageOffer string `mapstructure:"image_offer" required:"true"`
// SKU of the image offer to use for your base image (Azure Marketplace Images only). See
// [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
// for details.
//
// CLI example
// `az vm image list-skus --location westus --publisher Canonical --offer UbuntuServer`
ImageSku string `mapstructure:"image_sku" required:"true"`
// Specify a specific version of an OS to boot from.
// Defaults to `latest`. There may be a difference in versions available
// across regions due to image synchronization latency. To ensure a consistent
// version across regions set this value to one that is available in all
// regions where you are deploying.
//
// CLI example
// `az vm image list --location westus --publisher Canonical --offer UbuntuServer --sku 16.04.0-LTS --all`
ImageVersion string `mapstructure:"image_version" required:"false"`
// URL to a custom VHD to use for your base image. If this value is set,
// image_publisher, image_offer, image_sku, or image_version should not be set.
ImageUrl string `mapstructure:"image_url" required:"true"`
// Name of a custom managed image to use for your base image. If this value is set, do
// not set image_publisher, image_offer, image_sku, or image_version.
// If this value is set, the option
// `custom_managed_image_resource_group_name` must also be set. See
// [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
// to learn more about managed images.
CustomManagedImageName string `mapstructure:"custom_managed_image_name" required:"true"`
// Name of a custom managed image's resource group to use for your base image. If this
// value is set, image_publisher, image_offer, image_sku, or image_version should not be set.
// If this value is set, the option
// `custom_managed_image_name` must also be set. See
// [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
// to learn more about managed images.
CustomManagedImageResourceGroupName string `mapstructure:"custom_managed_image_resource_group_name" required:"true"`
customManagedImageID string
// Azure datacenter in which your VM will build.
Location string `mapstructure:"location"`
// Size of the VM used for building. This can be changed when you deploy a
// VM from your VHD. See
// [pricing](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/)
// information. Defaults to `Standard_A1`.
//
// CLI example `az vm list-sizes --location westus`
VMSize string `mapstructure:"vm_size" required:"false"`
// Specify the managed image resource group name where the result of the
// Packer build will be saved. The resource group must already exist. If
// this value is set, the value managed_image_name must also be set. See
// documentation to learn more about managed images.
ManagedImageResourceGroupName string `mapstructure:"managed_image_resource_group_name"`
// Specify the managed image name where the result of the Packer build will
// be saved. The image name must not exist ahead of time, and will not be
// overwritten. If this value is set, the value
// managed_image_resource_group_name must also be set. See documentation to
// learn more about managed images.
ManagedImageName string `mapstructure:"managed_image_name"`
// Specify the storage account
// type for a managed image. Valid values are Standard_LRS and Premium_LRS.
// The default is Standard_LRS.
ManagedImageStorageAccountType string `mapstructure:"managed_image_storage_account_type" required:"false"`
managedImageStorageAccountType compute.StorageAccountTypes
// If
// managed_image_os_disk_snapshot_name is set, a snapshot of the OS disk
// is created with the same name as this value before the VM is captured.
ManagedImageOSDiskSnapshotName string `mapstructure:"managed_image_os_disk_snapshot_name" required:"false"`
// If
// managed_image_data_disk_snapshot_prefix is set, snapshot of the data
// disk(s) is created with the same prefix as this value before the VM is
// captured.
ManagedImageDataDiskSnapshotPrefix string `mapstructure:"managed_image_data_disk_snapshot_prefix" required:"false"`
// Store the image in zone-resilient storage. You need to create it in a
// region that supports [availability
// zones](https://docs.microsoft.com/en-us/azure/availability-zones/az-overview).
ManagedImageZoneResilient bool `mapstructure:"managed_image_zone_resilient" required:"false"`
// Name/value pair tags to apply to every resource deployed i.e. Resource
// Group, VM, NIC, VNET, Public IP, KeyVault, etc. The user can define up
// to 15 tags. Tag names cannot exceed 512 characters, and tag values
// cannot exceed 256 characters.
AzureTags map[string]*string `mapstructure:"azure_tags" required:"false"`
// Same as [`azure_tags`](#azure_tags) but defined as a singular repeatable block
// containing a `name` and a `value` field. In HCL2 mode the
// [`dynamic_block`](/docs/configuration/from-1.5/expressions#dynamic-blocks)
// will allow you to create those programatically.
AzureTag hcl2template.NameValues `mapstructure:"azure_tag" required:"false"`
// Resource group under which the final artifact will be stored.
ResourceGroupName string `mapstructure:"resource_group_name"`
// Storage account under which the final artifact will be stored.
StorageAccount string `mapstructure:"storage_account"`
// temporary name assigned to the VM. If this
// value is not set, a random value will be assigned. Knowing the resource
// group and VM name allows one to execute commands to update the VM during a
// Packer build, e.g. attach a resource disk to the VM.
TempComputeName string `mapstructure:"temp_compute_name" required:"false"`
// name assigned to the temporary resource group created during the build.
// If this value is not set, a random value will be assigned. This resource
// group is deleted at the end of the build.
TempResourceGroupName string `mapstructure:"temp_resource_group_name"`
// Specify an existing resource group to run the build in.
BuildResourceGroupName string `mapstructure:"build_resource_group_name"`
// Specify an existing key vault to use for uploading certificates to the
// instance to connect.
BuildKeyVaultName string `mapstructure:"build_key_vault_name"`
// Specify the KeyVault SKU to create during the build. Valid values are
// standard or premium. The default value is standard.
BuildKeyVaultSKU string `mapstructure:"build_key_vault_sku"`
storageAccountBlobEndpoint string
// This value allows you to
// set a virtual_network_name and obtain a public IP. If this value is not
// set and virtual_network_name is defined Packer is only allowed to be
// executed from a host on the same subnet / virtual network.
PrivateVirtualNetworkWithPublicIp bool `mapstructure:"private_virtual_network_with_public_ip" required:"false"`
// Use a pre-existing virtual network for the
// VM. This option enables private communication with the VM, no public IP
// address is used or provisioned (unless you set
// private_virtual_network_with_public_ip).
VirtualNetworkName string `mapstructure:"virtual_network_name" required:"false"`
// If virtual_network_name is set,
// this value may also be set. If virtual_network_name is set, and this
// value is not set the builder attempts to determine the subnet to use with
// the virtual network. If the subnet cannot be found, or it cannot be
// disambiguated, this value should be set.
VirtualNetworkSubnetName string `mapstructure:"virtual_network_subnet_name" required:"false"`
// If virtual_network_name is
// set, this value may also be set. If virtual_network_name is set, and
// this value is not set the builder attempts to determine the resource group
// containing the virtual network. If the resource group cannot be found, or
// it cannot be disambiguated, this value should be set.
VirtualNetworkResourceGroupName string `mapstructure:"virtual_network_resource_group_name" required:"false"`
// Specify a file containing custom data to inject into the cloud-init
// process. The contents of the file are read and injected into the ARM
// template. The custom data will be passed to cloud-init for processing at
// the time of provisioning. See
// [documentation](http://cloudinit.readthedocs.io/en/latest/topics/examples.html)
// to learn more about custom data, and how it can be used to influence the
// provisioning process.
CustomDataFile string `mapstructure:"custom_data_file" required:"false"`
customData string
// Used for creating images from Marketplace images. Please refer to
// [Deploy an image with Marketplace
// terms](https://aka.ms/azuremarketplaceapideployment) for more details.
// Not all Marketplace images support programmatic deployment, and support
// is controlled by the image publisher.
//
// An example plan\_info object is defined below.
//
// ```json
// {
// "plan_info": {
// "plan_name": "rabbitmq",
// "plan_product": "rabbitmq",
// "plan_publisher": "bitnami"
// }
// }
// ```
//
// `plan_name` (string) - The plan name, required. `plan_product` (string) -
// The plan product, required. `plan_publisher` (string) - The plan publisher,
// required. `plan_promotion_code` (string) - Some images accept a promotion
// code, optional.
//
// Images created from the Marketplace with `plan_info` **must** specify
// `plan_info` whenever the image is deployed. The builder automatically adds
// tags to the image to ensure this information is not lost. The following
// tags are added.
//
// ```
// 1. PlanName
// 2. PlanProduct
// 3. PlanPublisher
// 4. PlanPromotionCode
// ```
//
PlanInfo PlanInformation `mapstructure:"plan_info" required:"false"`
// The default PollingDuration for azure is 15mins, this property will override
// that value. See [Azure DefaultPollingDuration](https://godoc.org/github.com/Azure/go-autorest/autorest#pkg-constants)
// If your Packer build is failing on the
// ARM deployment step with the error `Original Error:
// context deadline exceeded`, then you probably need to increase this timeout from
// its default of "15m" (valid time units include `s` for seconds, `m` for
// minutes, and `h` for hours.)
PollingDurationTimeout time.Duration `mapstructure:"polling_duration_timeout" required:"false"`
// If either Linux or Windows is specified Packer will
// automatically configure authentication credentials for the provisioned
// machine. For Linux this configures an SSH authorized key. For Windows
// this configures a WinRM certificate.
OSType string `mapstructure:"os_type" required:"false"`
// Specify the size of the OS disk in GB
// (gigabytes). Values of zero or less than zero are ignored.
OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb" required:"false"`
// The size(s) of any additional hard disks for the VM in gigabytes. If
// this is not specified then the VM will only contain an OS disk. The
// number of additional disks and maximum size of a disk depends on the
// configuration of your VM. See
// [Windows](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/about-disks-and-vhds)
// or
// [Linux](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/about-disks-and-vhds)
// for more information.
//
// For VHD builds the final artifacts will be named
// `PREFIX-dataDisk-<n>.UUID.vhd` and stored in the specified capture
// container along side the OS disk. The additional disks are included in
// the deployment template `PREFIX-vmTemplate.UUID`.
//
// For Managed build the final artifacts are included in the managed image.
// The additional disk will have the same storage account type as the OS
// disk, as specified with the `managed_image_storage_account_type`
// setting.
AdditionalDiskSize []int32 `mapstructure:"disk_additional_size" required:"false"`
// Specify the disk caching type. Valid values
// are None, ReadOnly, and ReadWrite. The default value is ReadWrite.
DiskCachingType string `mapstructure:"disk_caching_type" required:"false"`
diskCachingType compute.CachingTypes
// Specify the list of IP addresses and CIDR blocks that should be
// allowed access to the VM. If provided, an Azure Network Security
// Group will be created with corresponding rules and be bound to
// the subnet of the VM.
// Providing `allowed_inbound_ip_addresses` in combination with
// `virtual_network_name` is not allowed.
AllowedInboundIpAddresses []string `mapstructure:"allowed_inbound_ip_addresses"`
// Specify storage to store Boot Diagnostics -- Enabling this option
// will create 2 Files in the specified storage account. (serial console log & screehshot file)
// once the build is completed, it has to be removed manually.
// see [here](https://docs.microsoft.com/en-us/azure/virtual-machines/troubleshooting/boot-diagnostics) for more info
BootDiagSTGAccount string `mapstructure:"boot_diag_storage_account" required:"false"`
// specify custom azure resource names during build limited to max 10 characters
// this will set the prefix for the resources. The actuall resource names will be
// `custom_resource_build_prefix` + resourcetype + 5 character random alphanumeric string
CustomResourcePrefix string `mapstructure:"custom_resource_build_prefix" required:"false"`
// Runtime Values
UserName string `mapstructure-to-hcl2:",skip"`
Password string `mapstructure-to-hcl2:",skip"`
tmpAdminPassword string
tmpCertificatePassword string
tmpResourceGroupName string
tmpComputeName string
tmpNicName string
tmpPublicIPAddressName string
tmpDeploymentName string
tmpKeyVaultName string
tmpOSDiskName string
tmpDataDiskName string
tmpSubnetName string
tmpVirtualNetworkName string
tmpNsgName string
tmpWinRMCertificateUrl string
// Authentication with the VM via SSH
sshAuthorizedKey string
// Authentication with the VM via WinRM
winrmCertificate string
Comm communicator.Config `mapstructure:",squash"`
ctx interpolate.Context
// If you want packer to delete the
// temporary resource group asynchronously set this value. It's a boolean
// value and defaults to false. Important Setting this true means that
// your builds are faster, however any failed deletes are not reported.
AsyncResourceGroupDelete bool `mapstructure:"async_resourcegroup_delete" required:"false"`
}
type keyVaultCertificate struct {
Data string `json:"data"`
DataType string `json:"dataType"`
Password string `json:"password,omitempty"`
}
func (c *Config) toVMID() string {
var resourceGroupName string
if c.tmpResourceGroupName != "" {
resourceGroupName = c.tmpResourceGroupName
} else {
resourceGroupName = c.BuildResourceGroupName
}
return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s", c.ClientConfig.SubscriptionID, resourceGroupName, c.tmpComputeName)
}
func (c *Config) isManagedImage() bool {
return c.ManagedImageName != ""
}
func (c *Config) toVirtualMachineCaptureParameters() *compute.VirtualMachineCaptureParameters {
return &compute.VirtualMachineCaptureParameters{
DestinationContainerName: &c.CaptureContainerName,
VhdPrefix: &c.CaptureNamePrefix,
OverwriteVhds: to.BoolPtr(false),
}
}
func (c *Config) toImageParameters() *compute.Image {
return &compute.Image{
ImageProperties: &compute.ImageProperties{
SourceVirtualMachine: &compute.SubResource{
ID: to.StringPtr(c.toVMID()),
},
StorageProfile: &compute.ImageStorageProfile{
ZoneResilient: to.BoolPtr(c.ManagedImageZoneResilient),
},
},
Location: to.StringPtr(c.Location),
Tags: c.AzureTags,
}
}
func (c *Config) createCertificate() (string, error) {
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
err = fmt.Errorf("Failed to Generate Private Key: %s", err)
return "", err
}
host := fmt.Sprintf("%s.cloudapp.net", c.tmpComputeName)
notBefore := time.Now()
notAfter := notBefore.Add(24 * time.Hour)
serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
if err != nil {
err = fmt.Errorf("Failed to Generate Serial Number: %v", err)
return "", err
}
template := x509.Certificate{
SerialNumber: serialNumber,
Issuer: pkix.Name{
CommonName: host,
},
Subject: pkix.Name{
CommonName: host,
},
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
if err != nil {
err = fmt.Errorf("Failed to Create Certificate: %s", err)
return "", err
}
pfxBytes, err := pkcs12.Encode(derBytes, privateKey, c.tmpCertificatePassword)
if err != nil {
err = fmt.Errorf("Failed to encode certificate as PFX: %s", err)
return "", err
}
keyVaultDescription := keyVaultCertificate{
Data: base64.StdEncoding.EncodeToString(pfxBytes),
DataType: "pfx",
Password: c.tmpCertificatePassword,
}
bytes, err := json.Marshal(keyVaultDescription)
if err != nil {
err = fmt.Errorf("Failed to marshal key vault description: %s", err)
return "", err
}
return base64.StdEncoding.EncodeToString(bytes), nil
}
func (c *Config) Prepare(raws ...interface{}) ([]string, error) {
c.ctx.Funcs = azcommon.TemplateFuncs
err := config.Decode(c, &config.DecodeOpts{
Interpolate: true,
InterpolateContext: &c.ctx,
}, raws...)
if err != nil {
return nil, err
}
provideDefaultValues(c)
setRuntimeValues(c)
err = setUserNamePassword(c)
if err != nil {
return nil, err
}
// copy singular blocks
for _, kv := range c.AzureTag {
v := kv.Value
c.AzureTags[kv.Name] = &v
}
err = c.ClientConfig.SetDefaultValues()
if err != nil {
return nil, err
}
err = setCustomData(c)
if err != nil {
return nil, err
}
// NOTE: if the user did not specify a communicator, then default to both
// SSH and WinRM. This is for backwards compatibility because the code did
// not specifically force the user to set a communicator.
if c.Comm.Type == "" || strings.EqualFold(c.Comm.Type, "ssh") {
err = setSshValues(c)
if err != nil {
return nil, err
}
}
if c.Comm.Type == "" || strings.EqualFold(c.Comm.Type, "winrm") {
err = setWinRMCertificate(c)
if err != nil {
return nil, err
}
}
var errs *packer.MultiError
errs = packer.MultiErrorAppend(errs, c.Comm.Prepare(&c.ctx)...)
assertRequiredParametersSet(c, errs)
assertTagProperties(c, errs)
if errs != nil && len(errs.Errors) > 0 {
return nil, errs
}
return nil, nil
}
func setSshValues(c *Config) error {
if c.Comm.SSHTimeout == 0 {
c.Comm.SSHTimeout = 20 * time.Minute
}
if c.Comm.SSHPrivateKeyFile != "" {
privateKeyBytes, err := c.Comm.ReadSSHPrivateKeyFile()
if err != nil {
return err
}
signer, err := ssh.ParsePrivateKey(privateKeyBytes)
if err != nil {
return err
}
publicKey := signer.PublicKey()
c.sshAuthorizedKey = fmt.Sprintf("%s %s packer Azure Deployment%s",
publicKey.Type(),
base64.StdEncoding.EncodeToString(publicKey.Marshal()),
time.Now().Format(time.RFC3339))
c.Comm.SSHPrivateKey = privateKeyBytes
} else {
sshKeyPair, err := NewOpenSshKeyPair()
if err != nil {
return err
}
c.sshAuthorizedKey = sshKeyPair.AuthorizedKey()
c.Comm.SSHPrivateKey = sshKeyPair.PrivateKey()
}
return nil
}
func setWinRMCertificate(c *Config) error {
c.Comm.WinRMTransportDecorator =
func() winrm.Transporter {
return &winrm.ClientNTLM{}
}
cert, err := c.createCertificate()
c.winrmCertificate = cert
return err
}
func setRuntimeValues(c *Config) {
var tempName = NewTempName(c.CustomResourcePrefix)
c.tmpAdminPassword = tempName.AdminPassword
// store so that we can access this later during provisioning
packer.LogSecretFilter.Set(c.tmpAdminPassword)
c.tmpCertificatePassword = tempName.CertificatePassword
if c.TempComputeName == "" {
c.tmpComputeName = tempName.ComputeName
} else {
c.tmpComputeName = c.TempComputeName
}
c.tmpDeploymentName = tempName.DeploymentName
// Only set tmpResourceGroupName if no name has been specified
if c.TempResourceGroupName == "" && c.BuildResourceGroupName == "" {
c.tmpResourceGroupName = tempName.ResourceGroupName
} else if c.TempResourceGroupName != "" && c.BuildResourceGroupName == "" {
c.tmpResourceGroupName = c.TempResourceGroupName
}
c.tmpNicName = tempName.NicName
c.tmpPublicIPAddressName = tempName.PublicIPAddressName
c.tmpOSDiskName = tempName.OSDiskName
c.tmpDataDiskName = tempName.DataDiskName
c.tmpSubnetName = tempName.SubnetName
c.tmpVirtualNetworkName = tempName.VirtualNetworkName
c.tmpNsgName = tempName.NsgName
c.tmpKeyVaultName = tempName.KeyVaultName
}
func setUserNamePassword(c *Config) error {
// Set default credentials generated by the builder
c.UserName = DefaultUserName
c.Password = c.tmpAdminPassword
// Set communicator specific credentials and update defaults if different.
// Communicator specific credentials need to be updated as the standard Packer
// SSHConfigFunc and WinRMConfigFunc use communicator specific credentials, unless overwritten.
// SSH comm
if c.Comm.SSHUsername == "" {
c.Comm.SSHUsername = c.UserName
}
c.UserName = c.Comm.SSHUsername
if c.Comm.SSHPassword == "" {
c.Comm.SSHPassword = c.Password
}
c.Password = c.Comm.SSHPassword
if c.Comm.Type == "ssh" {
return nil
}
// WinRM comm
if c.Comm.WinRMUser == "" {
c.Comm.WinRMUser = c.UserName
}
c.UserName = c.Comm.WinRMUser
if c.Comm.WinRMPassword == "" {
// Configure password settings using Azure generated credentials
c.Comm.WinRMPassword = c.Password
}
if !isValidPassword(c.Comm.WinRMPassword) {
return fmt.Errorf("The supplied \"winrm_password\" must be between 8-123 characters long and must satisfy at least 3 from the following: \n1) Contains an uppercase character \n2) Contains a lowercase character\n3) Contains a numeric digit\n4) Contains a special character\n5) Control characters are not allowed")
}
c.Password = c.Comm.WinRMPassword
return nil
}
func setCustomData(c *Config) error {
if c.CustomDataFile == "" {
return nil
}
b, err := ioutil.ReadFile(c.CustomDataFile)
if err != nil {
return err
}
c.customData = base64.StdEncoding.EncodeToString(b)
return nil
}
func provideDefaultValues(c *Config) {
if c.VMSize == "" {
c.VMSize = DefaultVMSize
}
if c.ManagedImageStorageAccountType == "" {
c.managedImageStorageAccountType = compute.StorageAccountTypesStandardLRS
}
if c.DiskCachingType == "" {
c.diskCachingType = compute.CachingTypesReadWrite
}
if c.ImagePublisher != "" && c.ImageVersion == "" {
c.ImageVersion = DefaultImageVersion
}
if c.BuildKeyVaultSKU == "" {
c.BuildKeyVaultSKU = DefaultKeyVaultSKU
}
c.ClientConfig.SetDefaultValues()
}
func assertTagProperties(c *Config, errs *packer.MultiError) {
if len(c.AzureTags) > 15 {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("a max of 15 tags are supported, but %d were provided", len(c.AzureTags)))
}
for k, v := range c.AzureTags {
if len(k) > 512 {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("the tag name %q exceeds (%d) the 512 character limit", k, len(k)))
}
if len(*v) > 256 {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("the tag name %q exceeds (%d) the 256 character limit", *v, len(*v)))
}
}
}
func assertRequiredParametersSet(c *Config, errs *packer.MultiError) {
c.ClientConfig.Validate(errs)
/////////////////////////////////////////////
// Identity
if len(c.UserAssignedManagedIdentities) != 0 {
for _, rid := range c.UserAssignedManagedIdentities {
r, err := client.ParseResourceID(rid)
if err != nil {
errs = packer.MultiErrorAppend(errs, err)
}
if r.Provider != "" && !strings.EqualFold(r.Provider, "Microsoft.ManagedIdentity") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A valid user assigned managed identity resource id must have a correct resource provider"))
}
if r.ResourceType.String() != "" && !strings.EqualFold(r.ResourceType.String(), "userAssignedIdentities") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A valid user assigned managed identity resource id must have a correct resource type"))
}
}
}
/////////////////////////////////////////////
// Capture
if c.CaptureContainerName == "" && c.ManagedImageName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_container_name or managed_image_name must be specified"))
}
if c.CaptureNamePrefix == "" && c.ManagedImageResourceGroupName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_name_prefix or managed_image_resource_group_name must be specified"))
}
if (c.CaptureNamePrefix != "" || c.CaptureContainerName != "") && (c.ManagedImageResourceGroupName != "" || c.ManagedImageName != "") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("Either a VHD or a managed image can be built, but not both. Please specify either capture_container_name and capture_name_prefix or managed_image_resource_group_name and managed_image_name."))
}
if c.CaptureContainerName != "" {
if !reCaptureContainerName.MatchString(c.CaptureContainerName) {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_container_name must satisfy the regular expression %q.", reCaptureContainerName.String()))
}
if strings.HasSuffix(c.CaptureContainerName, "-") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_container_name must not end with a hyphen, e.g. '-'."))
}
if strings.Contains(c.CaptureContainerName, "--") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_container_name must not contain consecutive hyphens, e.g. '--'."))
}
if c.CaptureNamePrefix == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_name_prefix must be specified"))
}
if !reCaptureNamePrefix.MatchString(c.CaptureNamePrefix) {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_name_prefix must satisfy the regular expression %q.", reCaptureNamePrefix.String()))
}
if strings.HasSuffix(c.CaptureNamePrefix, "-") || strings.HasSuffix(c.CaptureNamePrefix, ".") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_name_prefix must not end with a hyphen or period."))
}
}
if c.TempResourceGroupName != "" && c.BuildResourceGroupName != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("The settings temp_resource_group_name and build_resource_group_name cannot both be defined. Please define one or neither."))
}
/////////////////////////////////////////////
// Compute
toInt := func(b bool) int {
if b {
return 1
} else {
return 0
}
}
isImageUrl := c.ImageUrl != ""
isCustomManagedImage := c.CustomManagedImageName != "" || c.CustomManagedImageResourceGroupName != ""
isSharedGallery := c.SharedGallery.GalleryName != ""
isPlatformImage := c.ImagePublisher != "" || c.ImageOffer != "" || c.ImageSku != ""
countSourceInputs := toInt(isImageUrl) + toInt(isCustomManagedImage) + toInt(isPlatformImage) + toInt(isSharedGallery)
if countSourceInputs > 1 {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("Specify either a VHD (image_url), Image Reference (image_publisher, image_offer, image_sku), a Managed Disk (custom_managed_disk_image_name, custom_managed_disk_resource_group_name), or a Shared Gallery Image (shared_image_gallery)"))
}
if isImageUrl && c.ManagedImageResourceGroupName != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A managed image must be created from a managed image, it cannot be created from a VHD."))
}
if c.SharedGallery.GalleryName != "" {
if c.SharedGallery.Subscription == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A shared_image_gallery.subscription must be specified"))
}
if c.SharedGallery.ResourceGroup == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A shared_image_gallery.resource_group must be specified"))
}
if c.SharedGallery.ImageName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A shared_image_gallery.image_name must be specified"))
}
if c.CaptureContainerName != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("VHD Target [capture_container_name] is not supported when using Shared Image Gallery as source. Use managed_image_resource_group_name instead."))
}
if c.CaptureNamePrefix != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("VHD Target [capture_name_prefix] is not supported when using Shared Image Gallery as source. Use managed_image_name instead."))
}
} else if c.ImageUrl == "" && c.CustomManagedImageName == "" {
if c.ImagePublisher == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An image_publisher must be specified"))
}
if c.ImageOffer == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An image_offer must be specified"))
}
if c.ImageSku == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An image_sku must be specified"))
}
} else if c.ImageUrl == "" && c.ImagePublisher == "" {
if c.CustomManagedImageResourceGroupName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A custom_managed_image_resource_group_name must be specified"))
}
if c.CustomManagedImageName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A custom_managed_image_name must be specified"))
}
if c.ManagedImageResourceGroupName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A managed_image_resource_group_name must be specified"))
}
if c.ManagedImageName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A managed_image_name must be specified"))
}
} else {
if c.ImagePublisher != "" || c.ImageOffer != "" || c.ImageSku != "" || c.ImageVersion != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An image_url must not be specified if image_publisher, image_offer, image_sku, or image_version is specified"))
}
}
/////////////////////////////////////////////
// Deployment
xor := func(a, b bool) bool {
return (a || b) && !(a && b)
}
if !xor((c.StorageAccount != "" || c.ResourceGroupName != ""), (c.ManagedImageName != "" || c.ManagedImageResourceGroupName != "")) {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("Specify either a VHD (storage_account and resource_group_name) or Managed Image (managed_image_resource_group_name and managed_image_name) output"))
}
if !xor(c.Location != "", c.BuildResourceGroupName != "") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("Specify either a location to create the resource group in or an existing build_resource_group_name, but not both."))
}
if c.ManagedImageName == "" && c.ManagedImageResourceGroupName == "" {
if c.StorageAccount == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A storage_account must be specified"))
}
if c.ResourceGroupName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A resource_group_name must be specified"))
}
}
if c.TempResourceGroupName != "" {
if ok, err := assertResourceGroupName(c.TempResourceGroupName, "temp_resource_group_name"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.BuildResourceGroupName != "" {
if ok, err := assertResourceGroupName(c.BuildResourceGroupName, "build_resource_group_name"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.ManagedImageResourceGroupName != "" {
if ok, err := assertResourceGroupName(c.ManagedImageResourceGroupName, "managed_image_resource_group_name"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.ManagedImageName != "" {
if ok, err := assertManagedImageName(c.ManagedImageName, "managed_image_name"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.ManagedImageName != "" && c.ManagedImageResourceGroupName != "" && c.SharedGalleryDestination.SigDestinationGalleryName != "" {
if c.SharedGalleryDestination.SigDestinationResourceGroup == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A resource_group must be specified for shared_image_gallery_destination"))
}
if c.SharedGalleryDestination.SigDestinationImageName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An image_name must be specified for shared_image_gallery_destination"))
}
if c.SharedGalleryDestination.SigDestinationImageVersion == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An image_version must be specified for shared_image_gallery_destination"))
}
if len(c.SharedGalleryDestination.SigDestinationReplicationRegions) == 0 {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A list of replication_regions must be specified for shared_image_gallery_destination"))
}
}
if c.SharedGalleryTimeout == 0 {
// default to a one-hour timeout. In the sdk, the default is 15 m.
c.SharedGalleryTimeout = 60 * time.Minute
}
if c.ManagedImageOSDiskSnapshotName != "" {
if ok, err := assertManagedImageOSDiskSnapshotName(c.ManagedImageOSDiskSnapshotName, "managed_image_os_disk_snapshot_name"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.ManagedImageDataDiskSnapshotPrefix != "" {
if ok, err := assertManagedImageDataDiskSnapshotName(c.ManagedImageDataDiskSnapshotPrefix, "managed_image_data_disk_snapshot_prefix"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.CustomResourcePrefix != "" {
if ok, err := assertResourceNamePrefix(c.CustomResourcePrefix, "custom_resource_build_prefix"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.VirtualNetworkName == "" && c.VirtualNetworkResourceGroupName != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("If virtual_network_resource_group_name is specified, so must virtual_network_name"))
}
if c.VirtualNetworkName == "" && c.VirtualNetworkSubnetName != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("If virtual_network_subnet_name is specified, so must virtual_network_name"))
}
if c.AllowedInboundIpAddresses != nil && len(c.AllowedInboundIpAddresses) >= 1 {
if c.VirtualNetworkName != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("If virtual_network_name is specified, allowed_inbound_ip_addresses cannot be specified"))
} else {
if ok, err := assertAllowedInboundIpAddresses(c.AllowedInboundIpAddresses, "allowed_inbound_ip_addresses"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
}
/////////////////////////////////////////////
// Plan Info
if c.PlanInfo.PlanName != "" || c.PlanInfo.PlanProduct != "" || c.PlanInfo.PlanPublisher != "" || c.PlanInfo.PlanPromotionCode != "" {
if c.PlanInfo.PlanName == "" || c.PlanInfo.PlanProduct == "" || c.PlanInfo.PlanPublisher == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("if either plan_name, plan_product, plan_publisher, or plan_promotion_code are defined then plan_name, plan_product, and plan_publisher must be defined"))
} else {
if c.AzureTags == nil {
c.AzureTags = make(map[string]*string)
}
c.AzureTags["PlanInfo"] = &c.PlanInfo.PlanName
c.AzureTags["PlanProduct"] = &c.PlanInfo.PlanProduct
c.AzureTags["PlanPublisher"] = &c.PlanInfo.PlanPublisher
c.AzureTags["PlanPromotionCode"] = &c.PlanInfo.PlanPromotionCode
}
}
/////////////////////////////////////////////
// Polling Duration Timeout
if c.PollingDurationTimeout == 0 {
// In the sdk, the default is 15 m.
c.PollingDurationTimeout = 15 * time.Minute
}
/////////////////////////////////////////////
// OS
if strings.EqualFold(c.OSType, constants.Target_Linux) {
c.OSType = constants.Target_Linux
} else if strings.EqualFold(c.OSType, constants.Target_Windows) {
c.OSType = constants.Target_Windows
} else if c.OSType == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An os_type must be specified"))
} else {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("The os_type %q is invalid", c.OSType))
}
switch c.ManagedImageStorageAccountType {
case "", string(compute.StorageAccountTypesStandardLRS):
c.managedImageStorageAccountType = compute.StorageAccountTypesStandardLRS
case string(compute.StorageAccountTypesPremiumLRS):
c.managedImageStorageAccountType = compute.StorageAccountTypesPremiumLRS
default:
errs = packer.MultiErrorAppend(errs, fmt.Errorf("The managed_image_storage_account_type %q is invalid", c.ManagedImageStorageAccountType))
}
switch c.DiskCachingType {
case string(compute.CachingTypesNone):
c.diskCachingType = compute.CachingTypesNone
case string(compute.CachingTypesReadOnly):
c.diskCachingType = compute.CachingTypesReadOnly
case "", string(compute.CachingTypesReadWrite):
c.diskCachingType = compute.CachingTypesReadWrite
default:
errs = packer.MultiErrorAppend(errs, fmt.Errorf("The disk_caching_type %q is invalid", c.DiskCachingType))
}
}
func assertManagedImageName(name, setting string) (bool, error) {
if !isValidAzureName(reManagedDiskName, name) {
return false, fmt.Errorf("The setting %s must match the regular expression %q, and not end with a '-' or '.'.", setting, validManagedDiskName)
}
return true, nil
}
func assertManagedImageOSDiskSnapshotName(name, setting string) (bool, error) {
if !isValidAzureName(reSnapshotName, name) {
return false, fmt.Errorf("The setting %s must only contain characters from a-z, A-Z, 0-9 and _ and the maximum length is 80 characters", setting)
}
return true, nil
}
func assertManagedImageDataDiskSnapshotName(name, setting string) (bool, error) {
if !isValidAzureName(reSnapshotPrefix, name) {
return false, fmt.Errorf("The setting %s must only contain characters from a-z, A-Z, 0-9 and _ and the maximum length (excluding the prefix) is 60 characters", setting)
}
return true, nil
}
func assertResourceNamePrefix(name, setting string) (bool, error) {
if !isValidAzureName(reResourceNamePrefix, name) {
return false, fmt.Errorf("The setting %s must only contain characters from a-z, A-Z, 0-9 and _ and the maximum length is 10 characters", setting)
}
return true, nil
}
func assertAllowedInboundIpAddresses(ipAddresses []string, setting string) (bool, error) {
for _, ipAddress := range ipAddresses {
if net.ParseIP(ipAddress) == nil {
if _, _, err := net.ParseCIDR(ipAddress); err != nil {
return false, fmt.Errorf("The setting %s must only contain valid IP addresses or CIDR blocks", setting)
}
}
}
return true, nil
}
func assertResourceGroupName(rgn, setting string) (bool, error) {
if !isValidAzureName(reResourceGroupName, rgn) {
return false, fmt.Errorf("The setting %s must match the regular expression %q, and not end with a '-' or '.'.", setting, validResourceGroupNameRe)
}
return true, nil
}
func isValidAzureName(re *regexp.Regexp, rgn string) bool {
return re.Match([]byte(rgn)) &&
!strings.HasSuffix(rgn, ".") &&
!strings.HasSuffix(rgn, "-")
}
// The supplied password must be between 8-123 characters long and must satisfy at least 3 of password complexity requirements from the following:
// 1) Contains an uppercase character
// 2) Contains a lowercase character
// 3) Contains a numeric digit
// 4) Contains a special character
// 5) Control characters are not allowed (a very specific case - not included in this validation)
func isValidPassword(password string) bool {
if !(len(password) >= 8 && len(password) <= 123) {
return false
}
requirements := 0
if strings.ContainsAny(password, random.PossibleNumbers) {
requirements++
}
if strings.ContainsAny(password, random.PossibleLowerCase) {
requirements++
}
if strings.ContainsAny(password, random.PossibleUpperCase) {
requirements++
}
if strings.ContainsAny(password, random.PossibleSpecialCharacter) {
requirements++
}
return requirements >= 3
}
func (c *Config) validateLocationZoneResiliency(say func(s string)) {
// Docs on regions that support Availibility Zones:
// https://docs.microsoft.com/en-us/azure/availability-zones/az-overview#regions-that-support-availability-zones
// Query technical names for locations:
// az account list-locations --query '[].name' -o tsv
var zones = make(map[string]struct{})
zones["westeurope"] = struct{}{}
zones["centralus"] = struct{}{}
zones["eastus2"] = struct{}{}
zones["francecentral"] = struct{}{}
zones["northeurope"] = struct{}{}
zones["southeastasia"] = struct{}{}
zones["westus2"] = struct{}{}
if _, ok := zones[c.Location]; !ok {
say(fmt.Sprintf("WARNING: Zone resiliency may not be supported in %s, checkout the docs at https://docs.microsoft.com/en-us/azure/availability-zones/", c.Location))
}
}
fix a bug
//go:generate struct-markdown
//go:generate mapstructure-to-hcl2 -type Config,SharedImageGallery,SharedImageGalleryDestination,PlanInformation
package arm
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"math/big"
"net"
"regexp"
"strings"
"time"
"github.com/hashicorp/packer/common/random"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
"github.com/Azure/go-autorest/autorest/to"
"github.com/masterzen/winrm"
azcommon "github.com/hashicorp/packer/builder/azure/common"
"github.com/hashicorp/packer/builder/azure/common/client"
"github.com/hashicorp/packer/builder/azure/common/constants"
"github.com/hashicorp/packer/builder/azure/pkcs12"
"github.com/hashicorp/packer/common"
"github.com/hashicorp/packer/hcl2template"
"github.com/hashicorp/packer/helper/communicator"
"github.com/hashicorp/packer/helper/config"
"github.com/hashicorp/packer/packer"
"github.com/hashicorp/packer/template/interpolate"
"golang.org/x/crypto/ssh"
)
const (
DefaultImageVersion = "latest"
DefaultUserName = "packer"
DefaultPrivateVirtualNetworkWithPublicIp = false
DefaultVMSize = "Standard_A1"
DefaultKeyVaultSKU = "standard"
)
const (
// https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#naming-rules-and-restrictions
// Regular expressions in Go are not expressive enough, such that the regular expression returned by Azure
// can be used (no backtracking).
//
// -> ^[^_\W][\w-._]{0,79}(?<![-.])$
//
// This is not an exhaustive match, but it should be extremely close.
validResourceGroupNameRe = "^[^_\\W][\\w-._\\(\\)]{0,89}$"
validManagedDiskName = "^[^_\\W][\\w-._)]{0,79}$"
validResourceNamePrefix = "^[^_\\W][\\w-._)]{0,10}$"
)
var (
reCaptureContainerName = regexp.MustCompile(`^[a-z0-9][a-z0-9\-]{2,62}$`)
reCaptureNamePrefix = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9_\-\.]{0,23}$`)
reManagedDiskName = regexp.MustCompile(validManagedDiskName)
reResourceGroupName = regexp.MustCompile(validResourceGroupNameRe)
reSnapshotName = regexp.MustCompile(`^[A-Za-z0-9_]{1,79}$`)
reSnapshotPrefix = regexp.MustCompile(`^[A-Za-z0-9_]{1,59}$`)
reResourceNamePrefix = regexp.MustCompile(validResourceNamePrefix)
)
type PlanInformation struct {
PlanName string `mapstructure:"plan_name"`
PlanProduct string `mapstructure:"plan_product"`
PlanPublisher string `mapstructure:"plan_publisher"`
PlanPromotionCode string `mapstructure:"plan_promotion_code"`
}
type SharedImageGallery struct {
Subscription string `mapstructure:"subscription"`
ResourceGroup string `mapstructure:"resource_group"`
GalleryName string `mapstructure:"gallery_name"`
ImageName string `mapstructure:"image_name"`
// Specify a specific version of an OS to boot from.
// Defaults to latest. There may be a difference in versions available
// across regions due to image synchronization latency. To ensure a consistent
// version across regions set this value to one that is available in all
// regions where you are deploying.
ImageVersion string `mapstructure:"image_version" required:"false"`
}
type SharedImageGalleryDestination struct {
SigDestinationResourceGroup string `mapstructure:"resource_group"`
SigDestinationGalleryName string `mapstructure:"gallery_name"`
SigDestinationImageName string `mapstructure:"image_name"`
SigDestinationImageVersion string `mapstructure:"image_version"`
SigDestinationReplicationRegions []string `mapstructure:"replication_regions"`
}
type Config struct {
common.PackerConfig `mapstructure:",squash"`
// Authentication via OAUTH
ClientConfig client.Config `mapstructure:",squash"`
// If set with one or more resource ids of user assigned managed identities, they will be configured on the VM.
// See [documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token)
// for how to acquire tokens within the VM.
// To assign a user assigned managed identity to a VM, the provided account or service principal must have [Managed Identity Operator](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#managed-identity-operator)
// and [Virtual Machine Contributor](https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles#virtual-machine-contributor) role assignments.
UserAssignedManagedIdentities []string `mapstructure:"user_assigned_managed_identities" required:"false"`
// VHD prefix.
CaptureNamePrefix string `mapstructure:"capture_name_prefix"`
// Destination container name.
CaptureContainerName string `mapstructure:"capture_container_name"`
// Use a [Shared Gallery
// image](https://azure.microsoft.com/en-us/blog/announcing-the-public-preview-of-shared-image-gallery/)
// as the source for this build. *VHD targets are incompatible with this
// build type* - the target must be a *Managed Image*.
//
// "shared_image_gallery": {
// "subscription": "00000000-0000-0000-0000-00000000000",
// "resource_group": "ResourceGroup",
// "gallery_name": "GalleryName",
// "image_name": "ImageName",
// "image_version": "1.0.0"
// }
// "managed_image_name": "TargetImageName",
// "managed_image_resource_group_name": "TargetResourceGroup"
SharedGallery SharedImageGallery `mapstructure:"shared_image_gallery" required:"false"`
// The name of the Shared Image Gallery under which the managed image will be published as Shared Gallery Image version.
//
// Following is an example.
//
// "shared_image_gallery_destination": {
// "resource_group": "ResourceGroup",
// "gallery_name": "GalleryName",
// "image_name": "ImageName",
// "image_version": "1.0.0",
// "replication_regions": ["regionA", "regionB", "regionC"]
// }
// "managed_image_name": "TargetImageName",
// "managed_image_resource_group_name": "TargetResourceGroup"
SharedGalleryDestination SharedImageGalleryDestination `mapstructure:"shared_image_gallery_destination"`
// How long to wait for an image to be published to the shared image
// gallery before timing out. If your Packer build is failing on the
// Publishing to Shared Image Gallery step with the error `Original Error:
// context deadline exceeded`, but the image is present when you check your
// Azure dashboard, then you probably need to increase this timeout from
// its default of "60m" (valid time units include `s` for seconds, `m` for
// minutes, and `h` for hours.)
SharedGalleryTimeout time.Duration `mapstructure:"shared_image_gallery_timeout"`
// The end of life date (2006-01-02T15:04:05.99Z) of the gallery Image Version. This property
// can be used for decommissioning purposes.
SharedGalleryImageVersionEndOfLifeDate string `mapstructure:"shared_gallery_image_version_end_of_life_date" required:"false"`
// The number of replicas of the Image Version to be created per region. This
// property would take effect for a region when regionalReplicaCount is not specified.
// Replica count must be between 1 and 10.
SharedGalleryImageVersionReplicaCount int32 `mapstructure:"shared_image_gallery_replica_count" required:"false"`
// If set to true, Virtual Machines deployed from the latest version of the
// Image Definition won't use this Image Version.
SharedGalleryImageVersionExcludeFromLatest bool `mapstructure:"shared_gallery_image_version_exclude_from_latest" required:"false"`
// Name of the publisher to use for your base image (Azure Marketplace Images only). See
// [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
// for details.
//
// CLI example `az vm image list-publishers --location westus`
ImagePublisher string `mapstructure:"image_publisher" required:"true"`
// Name of the publisher's offer to use for your base image (Azure Marketplace Images only). See
// [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
// for details.
//
// CLI example
// `az vm image list-offers --location westus --publisher Canonical`
ImageOffer string `mapstructure:"image_offer" required:"true"`
// SKU of the image offer to use for your base image (Azure Marketplace Images only). See
// [documentation](https://azure.microsoft.com/en-us/documentation/articles/resource-groups-vm-searching/)
// for details.
//
// CLI example
// `az vm image list-skus --location westus --publisher Canonical --offer UbuntuServer`
ImageSku string `mapstructure:"image_sku" required:"true"`
// Specify a specific version of an OS to boot from.
// Defaults to `latest`. There may be a difference in versions available
// across regions due to image synchronization latency. To ensure a consistent
// version across regions set this value to one that is available in all
// regions where you are deploying.
//
// CLI example
// `az vm image list --location westus --publisher Canonical --offer UbuntuServer --sku 16.04.0-LTS --all`
ImageVersion string `mapstructure:"image_version" required:"false"`
// URL to a custom VHD to use for your base image. If this value is set,
// image_publisher, image_offer, image_sku, or image_version should not be set.
ImageUrl string `mapstructure:"image_url" required:"true"`
// Name of a custom managed image to use for your base image. If this value is set, do
// not set image_publisher, image_offer, image_sku, or image_version.
// If this value is set, the option
// `custom_managed_image_resource_group_name` must also be set. See
// [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
// to learn more about managed images.
CustomManagedImageName string `mapstructure:"custom_managed_image_name" required:"true"`
// Name of a custom managed image's resource group to use for your base image. If this
// value is set, image_publisher, image_offer, image_sku, or image_version should not be set.
// If this value is set, the option
// `custom_managed_image_name` must also be set. See
// [documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview#images)
// to learn more about managed images.
CustomManagedImageResourceGroupName string `mapstructure:"custom_managed_image_resource_group_name" required:"true"`
customManagedImageID string
// Azure datacenter in which your VM will build.
Location string `mapstructure:"location"`
// Size of the VM used for building. This can be changed when you deploy a
// VM from your VHD. See
// [pricing](https://azure.microsoft.com/en-us/pricing/details/virtual-machines/)
// information. Defaults to `Standard_A1`.
//
// CLI example `az vm list-sizes --location westus`
VMSize string `mapstructure:"vm_size" required:"false"`
// Specify the managed image resource group name where the result of the
// Packer build will be saved. The resource group must already exist. If
// this value is set, the value managed_image_name must also be set. See
// documentation to learn more about managed images.
ManagedImageResourceGroupName string `mapstructure:"managed_image_resource_group_name"`
// Specify the managed image name where the result of the Packer build will
// be saved. The image name must not exist ahead of time, and will not be
// overwritten. If this value is set, the value
// managed_image_resource_group_name must also be set. See documentation to
// learn more about managed images.
ManagedImageName string `mapstructure:"managed_image_name"`
// Specify the storage account
// type for a managed image. Valid values are Standard_LRS and Premium_LRS.
// The default is Standard_LRS.
ManagedImageStorageAccountType string `mapstructure:"managed_image_storage_account_type" required:"false"`
managedImageStorageAccountType compute.StorageAccountTypes
// If
// managed_image_os_disk_snapshot_name is set, a snapshot of the OS disk
// is created with the same name as this value before the VM is captured.
ManagedImageOSDiskSnapshotName string `mapstructure:"managed_image_os_disk_snapshot_name" required:"false"`
// If
// managed_image_data_disk_snapshot_prefix is set, snapshot of the data
// disk(s) is created with the same prefix as this value before the VM is
// captured.
ManagedImageDataDiskSnapshotPrefix string `mapstructure:"managed_image_data_disk_snapshot_prefix" required:"false"`
// Store the image in zone-resilient storage. You need to create it in a
// region that supports [availability
// zones](https://docs.microsoft.com/en-us/azure/availability-zones/az-overview).
ManagedImageZoneResilient bool `mapstructure:"managed_image_zone_resilient" required:"false"`
// Name/value pair tags to apply to every resource deployed i.e. Resource
// Group, VM, NIC, VNET, Public IP, KeyVault, etc. The user can define up
// to 15 tags. Tag names cannot exceed 512 characters, and tag values
// cannot exceed 256 characters.
AzureTags map[string]*string `mapstructure:"azure_tags" required:"false"`
// Same as [`azure_tags`](#azure_tags) but defined as a singular repeatable block
// containing a `name` and a `value` field. In HCL2 mode the
// [`dynamic_block`](/docs/configuration/from-1.5/expressions#dynamic-blocks)
// will allow you to create those programatically.
AzureTag hcl2template.NameValues `mapstructure:"azure_tag" required:"false"`
// Resource group under which the final artifact will be stored.
ResourceGroupName string `mapstructure:"resource_group_name"`
// Storage account under which the final artifact will be stored.
StorageAccount string `mapstructure:"storage_account"`
// temporary name assigned to the VM. If this
// value is not set, a random value will be assigned. Knowing the resource
// group and VM name allows one to execute commands to update the VM during a
// Packer build, e.g. attach a resource disk to the VM.
TempComputeName string `mapstructure:"temp_compute_name" required:"false"`
// name assigned to the temporary resource group created during the build.
// If this value is not set, a random value will be assigned. This resource
// group is deleted at the end of the build.
TempResourceGroupName string `mapstructure:"temp_resource_group_name"`
// Specify an existing resource group to run the build in.
BuildResourceGroupName string `mapstructure:"build_resource_group_name"`
// Specify an existing key vault to use for uploading certificates to the
// instance to connect.
BuildKeyVaultName string `mapstructure:"build_key_vault_name"`
// Specify the KeyVault SKU to create during the build. Valid values are
// standard or premium. The default value is standard.
BuildKeyVaultSKU string `mapstructure:"build_key_vault_sku"`
storageAccountBlobEndpoint string
// This value allows you to
// set a virtual_network_name and obtain a public IP. If this value is not
// set and virtual_network_name is defined Packer is only allowed to be
// executed from a host on the same subnet / virtual network.
PrivateVirtualNetworkWithPublicIp bool `mapstructure:"private_virtual_network_with_public_ip" required:"false"`
// Use a pre-existing virtual network for the
// VM. This option enables private communication with the VM, no public IP
// address is used or provisioned (unless you set
// private_virtual_network_with_public_ip).
VirtualNetworkName string `mapstructure:"virtual_network_name" required:"false"`
// If virtual_network_name is set,
// this value may also be set. If virtual_network_name is set, and this
// value is not set the builder attempts to determine the subnet to use with
// the virtual network. If the subnet cannot be found, or it cannot be
// disambiguated, this value should be set.
VirtualNetworkSubnetName string `mapstructure:"virtual_network_subnet_name" required:"false"`
// If virtual_network_name is
// set, this value may also be set. If virtual_network_name is set, and
// this value is not set the builder attempts to determine the resource group
// containing the virtual network. If the resource group cannot be found, or
// it cannot be disambiguated, this value should be set.
VirtualNetworkResourceGroupName string `mapstructure:"virtual_network_resource_group_name" required:"false"`
// Specify a file containing custom data to inject into the cloud-init
// process. The contents of the file are read and injected into the ARM
// template. The custom data will be passed to cloud-init for processing at
// the time of provisioning. See
// [documentation](http://cloudinit.readthedocs.io/en/latest/topics/examples.html)
// to learn more about custom data, and how it can be used to influence the
// provisioning process.
CustomDataFile string `mapstructure:"custom_data_file" required:"false"`
customData string
// Used for creating images from Marketplace images. Please refer to
// [Deploy an image with Marketplace
// terms](https://aka.ms/azuremarketplaceapideployment) for more details.
// Not all Marketplace images support programmatic deployment, and support
// is controlled by the image publisher.
//
// An example plan\_info object is defined below.
//
// ```json
// {
// "plan_info": {
// "plan_name": "rabbitmq",
// "plan_product": "rabbitmq",
// "plan_publisher": "bitnami"
// }
// }
// ```
//
// `plan_name` (string) - The plan name, required. `plan_product` (string) -
// The plan product, required. `plan_publisher` (string) - The plan publisher,
// required. `plan_promotion_code` (string) - Some images accept a promotion
// code, optional.
//
// Images created from the Marketplace with `plan_info` **must** specify
// `plan_info` whenever the image is deployed. The builder automatically adds
// tags to the image to ensure this information is not lost. The following
// tags are added.
//
// ```
// 1. PlanName
// 2. PlanProduct
// 3. PlanPublisher
// 4. PlanPromotionCode
// ```
//
PlanInfo PlanInformation `mapstructure:"plan_info" required:"false"`
// The default PollingDuration for azure is 15mins, this property will override
// that value. See [Azure DefaultPollingDuration](https://godoc.org/github.com/Azure/go-autorest/autorest#pkg-constants)
// If your Packer build is failing on the
// ARM deployment step with the error `Original Error:
// context deadline exceeded`, then you probably need to increase this timeout from
// its default of "15m" (valid time units include `s` for seconds, `m` for
// minutes, and `h` for hours.)
PollingDurationTimeout time.Duration `mapstructure:"polling_duration_timeout" required:"false"`
// If either Linux or Windows is specified Packer will
// automatically configure authentication credentials for the provisioned
// machine. For Linux this configures an SSH authorized key. For Windows
// this configures a WinRM certificate.
OSType string `mapstructure:"os_type" required:"false"`
// Specify the size of the OS disk in GB
// (gigabytes). Values of zero or less than zero are ignored.
OSDiskSizeGB int32 `mapstructure:"os_disk_size_gb" required:"false"`
// The size(s) of any additional hard disks for the VM in gigabytes. If
// this is not specified then the VM will only contain an OS disk. The
// number of additional disks and maximum size of a disk depends on the
// configuration of your VM. See
// [Windows](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/about-disks-and-vhds)
// or
// [Linux](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/about-disks-and-vhds)
// for more information.
//
// For VHD builds the final artifacts will be named
// `PREFIX-dataDisk-<n>.UUID.vhd` and stored in the specified capture
// container along side the OS disk. The additional disks are included in
// the deployment template `PREFIX-vmTemplate.UUID`.
//
// For Managed build the final artifacts are included in the managed image.
// The additional disk will have the same storage account type as the OS
// disk, as specified with the `managed_image_storage_account_type`
// setting.
AdditionalDiskSize []int32 `mapstructure:"disk_additional_size" required:"false"`
// Specify the disk caching type. Valid values
// are None, ReadOnly, and ReadWrite. The default value is ReadWrite.
DiskCachingType string `mapstructure:"disk_caching_type" required:"false"`
diskCachingType compute.CachingTypes
// Specify the list of IP addresses and CIDR blocks that should be
// allowed access to the VM. If provided, an Azure Network Security
// Group will be created with corresponding rules and be bound to
// the subnet of the VM.
// Providing `allowed_inbound_ip_addresses` in combination with
// `virtual_network_name` is not allowed.
AllowedInboundIpAddresses []string `mapstructure:"allowed_inbound_ip_addresses"`
// Specify storage to store Boot Diagnostics -- Enabling this option
// will create 2 Files in the specified storage account. (serial console log & screehshot file)
// once the build is completed, it has to be removed manually.
// see [here](https://docs.microsoft.com/en-us/azure/virtual-machines/troubleshooting/boot-diagnostics) for more info
BootDiagSTGAccount string `mapstructure:"boot_diag_storage_account" required:"false"`
// specify custom azure resource names during build limited to max 10 characters
// this will set the prefix for the resources. The actuall resource names will be
// `custom_resource_build_prefix` + resourcetype + 5 character random alphanumeric string
CustomResourcePrefix string `mapstructure:"custom_resource_build_prefix" required:"false"`
// Runtime Values
UserName string `mapstructure-to-hcl2:",skip"`
Password string `mapstructure-to-hcl2:",skip"`
tmpAdminPassword string
tmpCertificatePassword string
tmpResourceGroupName string
tmpComputeName string
tmpNicName string
tmpPublicIPAddressName string
tmpDeploymentName string
tmpKeyVaultName string
tmpOSDiskName string
tmpDataDiskName string
tmpSubnetName string
tmpVirtualNetworkName string
tmpNsgName string
tmpWinRMCertificateUrl string
// Authentication with the VM via SSH
sshAuthorizedKey string
// Authentication with the VM via WinRM
winrmCertificate string
Comm communicator.Config `mapstructure:",squash"`
ctx interpolate.Context
// If you want packer to delete the
// temporary resource group asynchronously set this value. It's a boolean
// value and defaults to false. Important Setting this true means that
// your builds are faster, however any failed deletes are not reported.
AsyncResourceGroupDelete bool `mapstructure:"async_resourcegroup_delete" required:"false"`
}
type keyVaultCertificate struct {
Data string `json:"data"`
DataType string `json:"dataType"`
Password string `json:"password,omitempty"`
}
func (c *Config) toVMID() string {
var resourceGroupName string
if c.tmpResourceGroupName != "" {
resourceGroupName = c.tmpResourceGroupName
} else {
resourceGroupName = c.BuildResourceGroupName
}
return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachines/%s", c.ClientConfig.SubscriptionID, resourceGroupName, c.tmpComputeName)
}
func (c *Config) isManagedImage() bool {
return c.ManagedImageName != ""
}
func (c *Config) toVirtualMachineCaptureParameters() *compute.VirtualMachineCaptureParameters {
return &compute.VirtualMachineCaptureParameters{
DestinationContainerName: &c.CaptureContainerName,
VhdPrefix: &c.CaptureNamePrefix,
OverwriteVhds: to.BoolPtr(false),
}
}
func (c *Config) toImageParameters() *compute.Image {
return &compute.Image{
ImageProperties: &compute.ImageProperties{
SourceVirtualMachine: &compute.SubResource{
ID: to.StringPtr(c.toVMID()),
},
StorageProfile: &compute.ImageStorageProfile{
ZoneResilient: to.BoolPtr(c.ManagedImageZoneResilient),
},
},
Location: to.StringPtr(c.Location),
Tags: c.AzureTags,
}
}
func (c *Config) createCertificate() (string, error) {
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
err = fmt.Errorf("Failed to Generate Private Key: %s", err)
return "", err
}
host := fmt.Sprintf("%s.cloudapp.net", c.tmpComputeName)
notBefore := time.Now()
notAfter := notBefore.Add(24 * time.Hour)
serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
if err != nil {
err = fmt.Errorf("Failed to Generate Serial Number: %v", err)
return "", err
}
template := x509.Certificate{
SerialNumber: serialNumber,
Issuer: pkix.Name{
CommonName: host,
},
Subject: pkix.Name{
CommonName: host,
},
NotBefore: notBefore,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey)
if err != nil {
err = fmt.Errorf("Failed to Create Certificate: %s", err)
return "", err
}
pfxBytes, err := pkcs12.Encode(derBytes, privateKey, c.tmpCertificatePassword)
if err != nil {
err = fmt.Errorf("Failed to encode certificate as PFX: %s", err)
return "", err
}
keyVaultDescription := keyVaultCertificate{
Data: base64.StdEncoding.EncodeToString(pfxBytes),
DataType: "pfx",
Password: c.tmpCertificatePassword,
}
bytes, err := json.Marshal(keyVaultDescription)
if err != nil {
err = fmt.Errorf("Failed to marshal key vault description: %s", err)
return "", err
}
return base64.StdEncoding.EncodeToString(bytes), nil
}
func (c *Config) Prepare(raws ...interface{}) ([]string, error) {
c.ctx.Funcs = azcommon.TemplateFuncs
err := config.Decode(c, &config.DecodeOpts{
Interpolate: true,
InterpolateContext: &c.ctx,
}, raws...)
if err != nil {
return nil, err
}
provideDefaultValues(c)
setRuntimeValues(c)
err = setUserNamePassword(c)
if err != nil {
return nil, err
}
// copy singular blocks
for _, kv := range c.AzureTag {
v := kv.Value
c.AzureTags[kv.Name] = &v
}
err = c.ClientConfig.SetDefaultValues()
if err != nil {
return nil, err
}
err = setCustomData(c)
if err != nil {
return nil, err
}
// NOTE: if the user did not specify a communicator, then default to both
// SSH and WinRM. This is for backwards compatibility because the code did
// not specifically force the user to set a communicator.
if c.Comm.Type == "" || strings.EqualFold(c.Comm.Type, "ssh") {
err = setSshValues(c)
if err != nil {
return nil, err
}
}
if c.Comm.Type == "" || strings.EqualFold(c.Comm.Type, "winrm") {
err = setWinRMCertificate(c)
if err != nil {
return nil, err
}
}
var errs *packer.MultiError
errs = packer.MultiErrorAppend(errs, c.Comm.Prepare(&c.ctx)...)
assertRequiredParametersSet(c, errs)
assertTagProperties(c, errs)
if errs != nil && len(errs.Errors) > 0 {
return nil, errs
}
return nil, nil
}
func setSshValues(c *Config) error {
if c.Comm.SSHTimeout == 0 {
c.Comm.SSHTimeout = 20 * time.Minute
}
if c.Comm.SSHPrivateKeyFile != "" {
privateKeyBytes, err := c.Comm.ReadSSHPrivateKeyFile()
if err != nil {
return err
}
signer, err := ssh.ParsePrivateKey(privateKeyBytes)
if err != nil {
return err
}
publicKey := signer.PublicKey()
c.sshAuthorizedKey = fmt.Sprintf("%s %s packer Azure Deployment%s",
publicKey.Type(),
base64.StdEncoding.EncodeToString(publicKey.Marshal()),
time.Now().Format(time.RFC3339))
c.Comm.SSHPrivateKey = privateKeyBytes
} else {
sshKeyPair, err := NewOpenSshKeyPair()
if err != nil {
return err
}
c.sshAuthorizedKey = sshKeyPair.AuthorizedKey()
c.Comm.SSHPrivateKey = sshKeyPair.PrivateKey()
}
return nil
}
func setWinRMCertificate(c *Config) error {
c.Comm.WinRMTransportDecorator =
func() winrm.Transporter {
return &winrm.ClientNTLM{}
}
cert, err := c.createCertificate()
c.winrmCertificate = cert
return err
}
func setRuntimeValues(c *Config) {
var tempName = NewTempName(c.CustomResourcePrefix)
c.tmpAdminPassword = tempName.AdminPassword
// store so that we can access this later during provisioning
packer.LogSecretFilter.Set(c.tmpAdminPassword)
c.tmpCertificatePassword = tempName.CertificatePassword
if c.TempComputeName == "" {
c.tmpComputeName = tempName.ComputeName
} else {
c.tmpComputeName = c.TempComputeName
}
c.tmpDeploymentName = tempName.DeploymentName
// Only set tmpResourceGroupName if no name has been specified
if c.TempResourceGroupName == "" && c.BuildResourceGroupName == "" {
c.tmpResourceGroupName = tempName.ResourceGroupName
} else if c.TempResourceGroupName != "" && c.BuildResourceGroupName == "" {
c.tmpResourceGroupName = c.TempResourceGroupName
}
c.tmpNicName = tempName.NicName
c.tmpPublicIPAddressName = tempName.PublicIPAddressName
c.tmpOSDiskName = tempName.OSDiskName
c.tmpDataDiskName = tempName.DataDiskName
c.tmpSubnetName = tempName.SubnetName
c.tmpVirtualNetworkName = tempName.VirtualNetworkName
c.tmpNsgName = tempName.NsgName
c.tmpKeyVaultName = tempName.KeyVaultName
}
func setUserNamePassword(c *Config) error {
// Set default credentials generated by the builder
c.UserName = DefaultUserName
c.Password = c.tmpAdminPassword
// Set communicator specific credentials and update defaults if different.
// Communicator specific credentials need to be updated as the standard Packer
// SSHConfigFunc and WinRMConfigFunc use communicator specific credentials, unless overwritten.
// SSH comm
if c.Comm.SSHUsername == "" {
c.Comm.SSHUsername = c.UserName
}
c.UserName = c.Comm.SSHUsername
if c.Comm.SSHPassword == "" {
c.Comm.SSHPassword = c.Password
}
c.Password = c.Comm.SSHPassword
if c.Comm.Type == "ssh" {
return nil
}
// WinRM comm
if c.Comm.WinRMUser == "" {
c.Comm.WinRMUser = c.UserName
}
c.UserName = c.Comm.WinRMUser
if c.Comm.WinRMPassword == "" {
// Configure password settings using Azure generated credentials
c.Comm.WinRMPassword = c.Password
}
if !isValidPassword(c.Comm.WinRMPassword) {
return fmt.Errorf("The supplied \"winrm_password\" must be between 8-123 characters long and must satisfy at least 3 from the following: \n1) Contains an uppercase character \n2) Contains a lowercase character\n3) Contains a numeric digit\n4) Contains a special character\n5) Control characters are not allowed")
}
c.Password = c.Comm.WinRMPassword
return nil
}
func setCustomData(c *Config) error {
if c.CustomDataFile == "" {
return nil
}
b, err := ioutil.ReadFile(c.CustomDataFile)
if err != nil {
return err
}
c.customData = base64.StdEncoding.EncodeToString(b)
return nil
}
func provideDefaultValues(c *Config) {
if c.VMSize == "" {
c.VMSize = DefaultVMSize
}
if c.ManagedImageStorageAccountType == "" {
c.managedImageStorageAccountType = compute.StorageAccountTypesStandardLRS
}
if c.DiskCachingType == "" {
c.diskCachingType = compute.CachingTypesReadWrite
}
if c.ImagePublisher != "" && c.ImageVersion == "" {
c.ImageVersion = DefaultImageVersion
}
if c.BuildKeyVaultSKU == "" {
c.BuildKeyVaultSKU = DefaultKeyVaultSKU
}
c.ClientConfig.SetDefaultValues()
}
func assertTagProperties(c *Config, errs *packer.MultiError) {
if len(c.AzureTags) > 15 {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("a max of 15 tags are supported, but %d were provided", len(c.AzureTags)))
}
for k, v := range c.AzureTags {
if len(k) > 512 {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("the tag name %q exceeds (%d) the 512 character limit", k, len(k)))
}
if len(*v) > 256 {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("the tag name %q exceeds (%d) the 256 character limit", *v, len(*v)))
}
}
}
func assertRequiredParametersSet(c *Config, errs *packer.MultiError) {
c.ClientConfig.Validate(errs)
/////////////////////////////////////////////
// Identity
if len(c.UserAssignedManagedIdentities) != 0 {
for _, rid := range c.UserAssignedManagedIdentities {
r, err := client.ParseResourceID(rid)
if err != nil {
errs = packer.MultiErrorAppend(errs, err)
} else {
if !strings.EqualFold(r.Provider, "Microsoft.ManagedIdentity") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A valid user assigned managed identity resource id must have a correct resource provider"))
}
if !strings.EqualFold(r.ResourceType.String(), "userAssignedIdentities") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A valid user assigned managed identity resource id must have a correct resource type"))
}
}
}
}
/////////////////////////////////////////////
// Capture
if c.CaptureContainerName == "" && c.ManagedImageName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_container_name or managed_image_name must be specified"))
}
if c.CaptureNamePrefix == "" && c.ManagedImageResourceGroupName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_name_prefix or managed_image_resource_group_name must be specified"))
}
if (c.CaptureNamePrefix != "" || c.CaptureContainerName != "") && (c.ManagedImageResourceGroupName != "" || c.ManagedImageName != "") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("Either a VHD or a managed image can be built, but not both. Please specify either capture_container_name and capture_name_prefix or managed_image_resource_group_name and managed_image_name."))
}
if c.CaptureContainerName != "" {
if !reCaptureContainerName.MatchString(c.CaptureContainerName) {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_container_name must satisfy the regular expression %q.", reCaptureContainerName.String()))
}
if strings.HasSuffix(c.CaptureContainerName, "-") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_container_name must not end with a hyphen, e.g. '-'."))
}
if strings.Contains(c.CaptureContainerName, "--") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_container_name must not contain consecutive hyphens, e.g. '--'."))
}
if c.CaptureNamePrefix == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_name_prefix must be specified"))
}
if !reCaptureNamePrefix.MatchString(c.CaptureNamePrefix) {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_name_prefix must satisfy the regular expression %q.", reCaptureNamePrefix.String()))
}
if strings.HasSuffix(c.CaptureNamePrefix, "-") || strings.HasSuffix(c.CaptureNamePrefix, ".") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A capture_name_prefix must not end with a hyphen or period."))
}
}
if c.TempResourceGroupName != "" && c.BuildResourceGroupName != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("The settings temp_resource_group_name and build_resource_group_name cannot both be defined. Please define one or neither."))
}
/////////////////////////////////////////////
// Compute
toInt := func(b bool) int {
if b {
return 1
} else {
return 0
}
}
isImageUrl := c.ImageUrl != ""
isCustomManagedImage := c.CustomManagedImageName != "" || c.CustomManagedImageResourceGroupName != ""
isSharedGallery := c.SharedGallery.GalleryName != ""
isPlatformImage := c.ImagePublisher != "" || c.ImageOffer != "" || c.ImageSku != ""
countSourceInputs := toInt(isImageUrl) + toInt(isCustomManagedImage) + toInt(isPlatformImage) + toInt(isSharedGallery)
if countSourceInputs > 1 {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("Specify either a VHD (image_url), Image Reference (image_publisher, image_offer, image_sku), a Managed Disk (custom_managed_disk_image_name, custom_managed_disk_resource_group_name), or a Shared Gallery Image (shared_image_gallery)"))
}
if isImageUrl && c.ManagedImageResourceGroupName != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A managed image must be created from a managed image, it cannot be created from a VHD."))
}
if c.SharedGallery.GalleryName != "" {
if c.SharedGallery.Subscription == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A shared_image_gallery.subscription must be specified"))
}
if c.SharedGallery.ResourceGroup == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A shared_image_gallery.resource_group must be specified"))
}
if c.SharedGallery.ImageName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A shared_image_gallery.image_name must be specified"))
}
if c.CaptureContainerName != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("VHD Target [capture_container_name] is not supported when using Shared Image Gallery as source. Use managed_image_resource_group_name instead."))
}
if c.CaptureNamePrefix != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("VHD Target [capture_name_prefix] is not supported when using Shared Image Gallery as source. Use managed_image_name instead."))
}
} else if c.ImageUrl == "" && c.CustomManagedImageName == "" {
if c.ImagePublisher == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An image_publisher must be specified"))
}
if c.ImageOffer == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An image_offer must be specified"))
}
if c.ImageSku == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An image_sku must be specified"))
}
} else if c.ImageUrl == "" && c.ImagePublisher == "" {
if c.CustomManagedImageResourceGroupName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A custom_managed_image_resource_group_name must be specified"))
}
if c.CustomManagedImageName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A custom_managed_image_name must be specified"))
}
if c.ManagedImageResourceGroupName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A managed_image_resource_group_name must be specified"))
}
if c.ManagedImageName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A managed_image_name must be specified"))
}
} else {
if c.ImagePublisher != "" || c.ImageOffer != "" || c.ImageSku != "" || c.ImageVersion != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An image_url must not be specified if image_publisher, image_offer, image_sku, or image_version is specified"))
}
}
/////////////////////////////////////////////
// Deployment
xor := func(a, b bool) bool {
return (a || b) && !(a && b)
}
if !xor((c.StorageAccount != "" || c.ResourceGroupName != ""), (c.ManagedImageName != "" || c.ManagedImageResourceGroupName != "")) {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("Specify either a VHD (storage_account and resource_group_name) or Managed Image (managed_image_resource_group_name and managed_image_name) output"))
}
if !xor(c.Location != "", c.BuildResourceGroupName != "") {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("Specify either a location to create the resource group in or an existing build_resource_group_name, but not both."))
}
if c.ManagedImageName == "" && c.ManagedImageResourceGroupName == "" {
if c.StorageAccount == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A storage_account must be specified"))
}
if c.ResourceGroupName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A resource_group_name must be specified"))
}
}
if c.TempResourceGroupName != "" {
if ok, err := assertResourceGroupName(c.TempResourceGroupName, "temp_resource_group_name"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.BuildResourceGroupName != "" {
if ok, err := assertResourceGroupName(c.BuildResourceGroupName, "build_resource_group_name"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.ManagedImageResourceGroupName != "" {
if ok, err := assertResourceGroupName(c.ManagedImageResourceGroupName, "managed_image_resource_group_name"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.ManagedImageName != "" {
if ok, err := assertManagedImageName(c.ManagedImageName, "managed_image_name"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.ManagedImageName != "" && c.ManagedImageResourceGroupName != "" && c.SharedGalleryDestination.SigDestinationGalleryName != "" {
if c.SharedGalleryDestination.SigDestinationResourceGroup == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A resource_group must be specified for shared_image_gallery_destination"))
}
if c.SharedGalleryDestination.SigDestinationImageName == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An image_name must be specified for shared_image_gallery_destination"))
}
if c.SharedGalleryDestination.SigDestinationImageVersion == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An image_version must be specified for shared_image_gallery_destination"))
}
if len(c.SharedGalleryDestination.SigDestinationReplicationRegions) == 0 {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("A list of replication_regions must be specified for shared_image_gallery_destination"))
}
}
if c.SharedGalleryTimeout == 0 {
// default to a one-hour timeout. In the sdk, the default is 15 m.
c.SharedGalleryTimeout = 60 * time.Minute
}
if c.ManagedImageOSDiskSnapshotName != "" {
if ok, err := assertManagedImageOSDiskSnapshotName(c.ManagedImageOSDiskSnapshotName, "managed_image_os_disk_snapshot_name"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.ManagedImageDataDiskSnapshotPrefix != "" {
if ok, err := assertManagedImageDataDiskSnapshotName(c.ManagedImageDataDiskSnapshotPrefix, "managed_image_data_disk_snapshot_prefix"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.CustomResourcePrefix != "" {
if ok, err := assertResourceNamePrefix(c.CustomResourcePrefix, "custom_resource_build_prefix"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
if c.VirtualNetworkName == "" && c.VirtualNetworkResourceGroupName != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("If virtual_network_resource_group_name is specified, so must virtual_network_name"))
}
if c.VirtualNetworkName == "" && c.VirtualNetworkSubnetName != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("If virtual_network_subnet_name is specified, so must virtual_network_name"))
}
if c.AllowedInboundIpAddresses != nil && len(c.AllowedInboundIpAddresses) >= 1 {
if c.VirtualNetworkName != "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("If virtual_network_name is specified, allowed_inbound_ip_addresses cannot be specified"))
} else {
if ok, err := assertAllowedInboundIpAddresses(c.AllowedInboundIpAddresses, "allowed_inbound_ip_addresses"); !ok {
errs = packer.MultiErrorAppend(errs, err)
}
}
}
/////////////////////////////////////////////
// Plan Info
if c.PlanInfo.PlanName != "" || c.PlanInfo.PlanProduct != "" || c.PlanInfo.PlanPublisher != "" || c.PlanInfo.PlanPromotionCode != "" {
if c.PlanInfo.PlanName == "" || c.PlanInfo.PlanProduct == "" || c.PlanInfo.PlanPublisher == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("if either plan_name, plan_product, plan_publisher, or plan_promotion_code are defined then plan_name, plan_product, and plan_publisher must be defined"))
} else {
if c.AzureTags == nil {
c.AzureTags = make(map[string]*string)
}
c.AzureTags["PlanInfo"] = &c.PlanInfo.PlanName
c.AzureTags["PlanProduct"] = &c.PlanInfo.PlanProduct
c.AzureTags["PlanPublisher"] = &c.PlanInfo.PlanPublisher
c.AzureTags["PlanPromotionCode"] = &c.PlanInfo.PlanPromotionCode
}
}
/////////////////////////////////////////////
// Polling Duration Timeout
if c.PollingDurationTimeout == 0 {
// In the sdk, the default is 15 m.
c.PollingDurationTimeout = 15 * time.Minute
}
/////////////////////////////////////////////
// OS
if strings.EqualFold(c.OSType, constants.Target_Linux) {
c.OSType = constants.Target_Linux
} else if strings.EqualFold(c.OSType, constants.Target_Windows) {
c.OSType = constants.Target_Windows
} else if c.OSType == "" {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("An os_type must be specified"))
} else {
errs = packer.MultiErrorAppend(errs, fmt.Errorf("The os_type %q is invalid", c.OSType))
}
switch c.ManagedImageStorageAccountType {
case "", string(compute.StorageAccountTypesStandardLRS):
c.managedImageStorageAccountType = compute.StorageAccountTypesStandardLRS
case string(compute.StorageAccountTypesPremiumLRS):
c.managedImageStorageAccountType = compute.StorageAccountTypesPremiumLRS
default:
errs = packer.MultiErrorAppend(errs, fmt.Errorf("The managed_image_storage_account_type %q is invalid", c.ManagedImageStorageAccountType))
}
switch c.DiskCachingType {
case string(compute.CachingTypesNone):
c.diskCachingType = compute.CachingTypesNone
case string(compute.CachingTypesReadOnly):
c.diskCachingType = compute.CachingTypesReadOnly
case "", string(compute.CachingTypesReadWrite):
c.diskCachingType = compute.CachingTypesReadWrite
default:
errs = packer.MultiErrorAppend(errs, fmt.Errorf("The disk_caching_type %q is invalid", c.DiskCachingType))
}
}
func assertManagedImageName(name, setting string) (bool, error) {
if !isValidAzureName(reManagedDiskName, name) {
return false, fmt.Errorf("The setting %s must match the regular expression %q, and not end with a '-' or '.'.", setting, validManagedDiskName)
}
return true, nil
}
func assertManagedImageOSDiskSnapshotName(name, setting string) (bool, error) {
if !isValidAzureName(reSnapshotName, name) {
return false, fmt.Errorf("The setting %s must only contain characters from a-z, A-Z, 0-9 and _ and the maximum length is 80 characters", setting)
}
return true, nil
}
func assertManagedImageDataDiskSnapshotName(name, setting string) (bool, error) {
if !isValidAzureName(reSnapshotPrefix, name) {
return false, fmt.Errorf("The setting %s must only contain characters from a-z, A-Z, 0-9 and _ and the maximum length (excluding the prefix) is 60 characters", setting)
}
return true, nil
}
func assertResourceNamePrefix(name, setting string) (bool, error) {
if !isValidAzureName(reResourceNamePrefix, name) {
return false, fmt.Errorf("The setting %s must only contain characters from a-z, A-Z, 0-9 and _ and the maximum length is 10 characters", setting)
}
return true, nil
}
func assertAllowedInboundIpAddresses(ipAddresses []string, setting string) (bool, error) {
for _, ipAddress := range ipAddresses {
if net.ParseIP(ipAddress) == nil {
if _, _, err := net.ParseCIDR(ipAddress); err != nil {
return false, fmt.Errorf("The setting %s must only contain valid IP addresses or CIDR blocks", setting)
}
}
}
return true, nil
}
func assertResourceGroupName(rgn, setting string) (bool, error) {
if !isValidAzureName(reResourceGroupName, rgn) {
return false, fmt.Errorf("The setting %s must match the regular expression %q, and not end with a '-' or '.'.", setting, validResourceGroupNameRe)
}
return true, nil
}
func isValidAzureName(re *regexp.Regexp, rgn string) bool {
return re.Match([]byte(rgn)) &&
!strings.HasSuffix(rgn, ".") &&
!strings.HasSuffix(rgn, "-")
}
// The supplied password must be between 8-123 characters long and must satisfy at least 3 of password complexity requirements from the following:
// 1) Contains an uppercase character
// 2) Contains a lowercase character
// 3) Contains a numeric digit
// 4) Contains a special character
// 5) Control characters are not allowed (a very specific case - not included in this validation)
func isValidPassword(password string) bool {
if !(len(password) >= 8 && len(password) <= 123) {
return false
}
requirements := 0
if strings.ContainsAny(password, random.PossibleNumbers) {
requirements++
}
if strings.ContainsAny(password, random.PossibleLowerCase) {
requirements++
}
if strings.ContainsAny(password, random.PossibleUpperCase) {
requirements++
}
if strings.ContainsAny(password, random.PossibleSpecialCharacter) {
requirements++
}
return requirements >= 3
}
func (c *Config) validateLocationZoneResiliency(say func(s string)) {
// Docs on regions that support Availibility Zones:
// https://docs.microsoft.com/en-us/azure/availability-zones/az-overview#regions-that-support-availability-zones
// Query technical names for locations:
// az account list-locations --query '[].name' -o tsv
var zones = make(map[string]struct{})
zones["westeurope"] = struct{}{}
zones["centralus"] = struct{}{}
zones["eastus2"] = struct{}{}
zones["francecentral"] = struct{}{}
zones["northeurope"] = struct{}{}
zones["southeastasia"] = struct{}{}
zones["westus2"] = struct{}{}
if _, ok := zones[c.Location]; !ok {
say(fmt.Sprintf("WARNING: Zone resiliency may not be supported in %s, checkout the docs at https://docs.microsoft.com/en-us/azure/availability-zones/", c.Location))
}
}
|
// Package adminz provides a simple set of adminz pages for administering
// a simple go server.
package adminz
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"runtime"
"sync"
"time"
)
type Adminz struct {
// keep track of killfile state
running bool
// ticker that checks killfiles every 1 second
killfileTicker *time.Ticker
// list of killfilePaths to check
killfilePaths []string
// defaults to 1 second
checkInterval time.Duration
// generates data to return to /servicez endpoint. marshalled into json.
servicez func() interface{}
// resume is called when the server is unkilled
onresume func()
// pause is called when the server is killed
onpause func()
// healthy returns true iff the server is ready to respond to requests
healthy func() bool
sync.Mutex
// the various handlers are attached to serveMux or DefaultServeMux
mux *http.ServeMux
}
// Creates a new Adminz "builder". Not safe to use until Start() is called.
func New() *Adminz {
return &Adminz{}
}
func (a *Adminz) Resume() {
a.Lock()
defer a.Unlock()
a.doResume()
}
// Internal helper to resume if stopped. MUST be called while holding a.Lock. Use a.Resume if not.
func (a *Adminz) doResume() {
if !a.running {
if a.onresume != nil {
a.onresume()
}
a.running = true
}
}
func (a *Adminz) ServeMux(mux *http.ServeMux) *Adminz {
a.mux = mux
return a
}
// Resume is called when the server is unkilled
func (a *Adminz) OnResume(resume func()) *Adminz {
a.onresume = resume
return a
}
// Pause the server if it is running and return true, otherwise no-op and return false.
func (a *Adminz) Pause() bool {
a.Lock()
defer a.Unlock()
return a.doPause()
}
// Internal helper to pause if running. MUST be called while holding a.Lock. Use a.Pause if not.
func (a *Adminz) doPause() bool {
was := a.running
if a.running {
if a.onpause != nil {
a.onpause()
}
a.running = false
}
return was
}
// pause is called when the server is killed
func (a *Adminz) OnPause(pause func()) *Adminz {
a.onpause = pause
return a
}
// healthy returns true iff the server is ready to respond to requests
func (a *Adminz) Healthy(healthy func() bool) *Adminz {
a.healthy = healthy
return a
}
// servicez generates data to return to /servicez endpoint. marshalled into
// json.
func (a *Adminz) Servicez(servicez func() interface{}) *Adminz {
a.servicez = servicez
return a
}
// Sets the list of killfilePaths to check.
func (a *Adminz) KillfilePaths(killfilePaths []string) *Adminz {
a.killfilePaths = killfilePaths
return a
}
// Sets frequency the killfile is checked. defaults every second
func (a *Adminz) KillfileInterval(interval time.Duration) *Adminz {
a.checkInterval = interval
return a
}
// Start initializes handlers and starts killfile checking. Make sure to
// remember to call this!
func (a *Adminz) Start() *Adminz {
if a.mux != nil {
a.mux.HandleFunc("/healthz", a.healthzHandler)
a.mux.HandleFunc("/servicez", a.ServicezHandler)
a.mux.HandleFunc("/quitquitquit", a.quitHandler)
a.mux.HandleFunc("/abortabortabort", a.abortHandler)
a.mux.HandleFunc("/gc", a.gcHandler)
} else {
http.HandleFunc("/healthz", a.healthzHandler)
http.HandleFunc("/servicez", a.ServicezHandler)
http.HandleFunc("/quitquitquit", a.quitHandler)
http.HandleFunc("/abortabortabort", a.abortHandler)
http.HandleFunc("/gc", a.gcHandler)
}
log.Print("adminz registered")
if a.checkInterval == 0 {
a.checkInterval = 1 * time.Second
}
a.killfileTicker = time.NewTicker(a.checkInterval)
// start killfile checking loop
if len(a.killfilePaths) > 0 {
log.Print("Watching paths for killfile: ", a.killfilePaths)
a.checkKillfiles()
go a.killfileLoop()
} else {
log.Print("No killfiles being watched.")
}
return a
}
// Stops killfile checking
func (a *Adminz) Stop() {
if a.killfileTicker != nil {
a.killfileTicker.Stop()
}
}
// Generates the standard set of killfiles. Pass these to KillfilePaths
func Killfiles(ports ...int) []string {
// the number of ports + the "all" killfile
log.Print(ports)
var ret = make([]string, len(ports)+1)
for i, port := range ports {
ret[i] = fmt.Sprintf("/dev/shm/healthz/kill.%d", port)
}
ret[len(ports)] = "/dev/shm/healthz/kill.all"
return ret
}
func (a *Adminz) checkKillfiles() bool {
for _, killfile := range a.killfilePaths {
file, err := os.Open(killfile)
if file != nil && err == nil {
file.Close()
if a.Pause() {
log.Println("paused due to ", killfile)
}
return true
}
}
a.Resume()
return false
}
func (a *Adminz) killfileLoop() {
for _ = range a.killfileTicker.C {
a.checkKillfiles()
}
}
func (a *Adminz) healthzHandler(w http.ResponseWriter, r *http.Request) {
// we are healthy iff:
// we are not killed AND
// a.healthy is unset (so we ignore it) OR
// a.healthy() returns true
var ret string
if a.running && (a.healthy == nil || a.healthy()) {
ret = "OK"
} else {
w.WriteHeader(http.StatusServiceUnavailable)
ret = "Service Unavailable"
log.Println("Healthz returning ", ret)
}
w.Write(([]byte)(ret))
}
type EmptyStruct struct {
}
func (a *Adminz) ServicezHandler(w http.ResponseWriter, r *http.Request) {
var data interface{}
if a.servicez != nil {
data = a.servicez()
} else {
data = &EmptyStruct{}
}
bytes, err := json.Marshal(data)
if err == nil {
w.Header().Add("Content-Type", "application/json")
// TODO I probably need to serialize reads to servicez as who knows what
// people will put in that function
w.Write(bytes)
} else {
http.Error(w, err.Error(), 500)
}
}
func (a *Adminz) quitHandler(w http.ResponseWriter, r *http.Request) {
a.Pause()
}
func (a *Adminz) abortHandler(w http.ResponseWriter, r *http.Request) {
a.Pause()
}
func (a *Adminz) gcHandler(w http.ResponseWriter, r *http.Request) {
var mem runtime.MemStats
mb := uint64(1024 * 1024)
runtime.ReadMemStats(&mem)
fmt.Fprintln(w, "Before")
fmt.Fprintln(w, "\tAlloc\t", mem.Alloc/mb)
fmt.Fprintln(w, "\tTotalAlloc:\t", mem.TotalAlloc/mb)
fmt.Fprintln(w, "\tHeapAlloc:\t", mem.HeapAlloc/mb)
fmt.Fprintln(w, "\tHeapSys:\t", mem.HeapSys/mb)
fmt.Fprintln(w, "\tSys:\t", mem.Sys/mb)
a.Lock()
was := a.doPause()
runtime.GC()
if was {
a.doResume()
}
a.Unlock()
runtime.ReadMemStats(&mem)
fmt.Fprintln(w, "After")
fmt.Fprintln(w, "\tAlloc\t", mem.Alloc/mb)
fmt.Fprintln(w, "\tTotalAlloc:\t", mem.TotalAlloc/mb)
fmt.Fprintln(w, "\tHeapAlloc:\t", mem.HeapAlloc/mb)
fmt.Fprintln(w, "\tHeapSys:\t", mem.HeapSys/mb)
fmt.Fprintln(w, "\tSys:\t", mem.Sys/mb)
w.Write([]byte("OK"))
}
Stop checking killfiles before pausing.
The killfile checker unpauses the server if there are no killfiles set. We don't
set a killfile on /abortabortabort or /quitquitquit, so we disable killfile
checking before pausing.
// Package adminz provides a simple set of adminz pages for administering
// a simple go server.
package adminz
import (
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"runtime"
"sync"
"time"
)
type Adminz struct {
// keep track of killfile state
running bool
// ticker that checks killfiles every 1 second
killfileTicker *time.Ticker
// list of killfilePaths to check
killfilePaths []string
// defaults to 1 second
checkInterval time.Duration
// generates data to return to /servicez endpoint. marshalled into json.
servicez func() interface{}
// resume is called when the server is unkilled
onresume func()
// pause is called when the server is killed
onpause func()
// healthy returns true iff the server is ready to respond to requests
healthy func() bool
sync.Mutex
// the various handlers are attached to serveMux or DefaultServeMux
mux *http.ServeMux
}
// Creates a new Adminz "builder". Not safe to use until Start() is called.
func New() *Adminz {
return &Adminz{}
}
func (a *Adminz) Resume() {
a.Lock()
defer a.Unlock()
a.doResume()
}
// Internal helper to resume if stopped. MUST be called while holding a.Lock. Use a.Resume if not.
func (a *Adminz) doResume() {
if !a.running {
if a.onresume != nil {
a.onresume()
}
a.running = true
}
}
func (a *Adminz) ServeMux(mux *http.ServeMux) *Adminz {
a.mux = mux
return a
}
// Resume is called when the server is unkilled
func (a *Adminz) OnResume(resume func()) *Adminz {
a.onresume = resume
return a
}
// Pause the server if it is running and return true, otherwise no-op and return false.
func (a *Adminz) Pause() bool {
a.Lock()
defer a.Unlock()
return a.doPause()
}
// Internal helper to pause if running. MUST be called while holding a.Lock. Use a.Pause if not.
func (a *Adminz) doPause() bool {
was := a.running
if a.running {
if a.onpause != nil {
a.onpause()
}
a.running = false
}
return was
}
// pause is called when the server is killed
func (a *Adminz) OnPause(pause func()) *Adminz {
a.onpause = pause
return a
}
// healthy returns true iff the server is ready to respond to requests
func (a *Adminz) Healthy(healthy func() bool) *Adminz {
a.healthy = healthy
return a
}
// servicez generates data to return to /servicez endpoint. marshalled into
// json.
func (a *Adminz) Servicez(servicez func() interface{}) *Adminz {
a.servicez = servicez
return a
}
// Sets the list of killfilePaths to check.
func (a *Adminz) KillfilePaths(killfilePaths []string) *Adminz {
a.killfilePaths = killfilePaths
return a
}
// Sets frequency the killfile is checked. defaults every second
func (a *Adminz) KillfileInterval(interval time.Duration) *Adminz {
a.checkInterval = interval
return a
}
// Start initializes handlers and starts killfile checking. Make sure to
// remember to call this!
func (a *Adminz) Start() *Adminz {
if a.mux != nil {
a.mux.HandleFunc("/healthz", a.healthzHandler)
a.mux.HandleFunc("/servicez", a.ServicezHandler)
a.mux.HandleFunc("/quitquitquit", a.quitHandler)
a.mux.HandleFunc("/abortabortabort", a.abortHandler)
a.mux.HandleFunc("/gc", a.gcHandler)
} else {
http.HandleFunc("/healthz", a.healthzHandler)
http.HandleFunc("/servicez", a.ServicezHandler)
http.HandleFunc("/quitquitquit", a.quitHandler)
http.HandleFunc("/abortabortabort", a.abortHandler)
http.HandleFunc("/gc", a.gcHandler)
}
log.Print("adminz registered")
if a.checkInterval == 0 {
a.checkInterval = 1 * time.Second
}
a.killfileTicker = time.NewTicker(a.checkInterval)
// start killfile checking loop
if len(a.killfilePaths) > 0 {
log.Print("Watching paths for killfile: ", a.killfilePaths)
a.checkKillfiles()
go a.killfileLoop()
} else {
log.Print("No killfiles being watched.")
}
return a
}
// Stops killfile checking
func (a *Adminz) Stop() {
if a.killfileTicker != nil {
a.killfileTicker.Stop()
}
}
// Generates the standard set of killfiles. Pass these to KillfilePaths
func Killfiles(ports ...int) []string {
// the number of ports + the "all" killfile
log.Print(ports)
var ret = make([]string, len(ports)+1)
for i, port := range ports {
ret[i] = fmt.Sprintf("/dev/shm/healthz/kill.%d", port)
}
ret[len(ports)] = "/dev/shm/healthz/kill.all"
return ret
}
func (a *Adminz) checkKillfiles() bool {
for _, killfile := range a.killfilePaths {
file, err := os.Open(killfile)
if file != nil && err == nil {
file.Close()
if a.Pause() {
log.Println("paused due to ", killfile)
}
return true
}
}
a.Resume()
return false
}
func (a *Adminz) killfileLoop() {
for _ = range a.killfileTicker.C {
a.checkKillfiles()
}
}
func (a *Adminz) healthzHandler(w http.ResponseWriter, r *http.Request) {
// we are healthy iff:
// we are not killed AND
// a.healthy is unset (so we ignore it) OR
// a.healthy() returns true
var ret string
if a.running && (a.healthy == nil || a.healthy()) {
ret = "OK"
} else {
w.WriteHeader(http.StatusServiceUnavailable)
ret = "Service Unavailable"
log.Println("Unhealthy, returning ", ret)
}
w.Write(([]byte)(ret))
}
type EmptyStruct struct {
}
func (a *Adminz) ServicezHandler(w http.ResponseWriter, r *http.Request) {
var data interface{}
if a.servicez != nil {
data = a.servicez()
} else {
data = &EmptyStruct{}
}
bytes, err := json.Marshal(data)
if err == nil {
w.Header().Add("Content-Type", "application/json")
// TODO I probably need to serialize reads to servicez as who knows what
// people will put in that function
w.Write(bytes)
} else {
http.Error(w, err.Error(), 500)
}
}
func (a *Adminz) quitHandler(w http.ResponseWriter, r *http.Request) {
log.Println("quitquitquit called! Pausing service")
a.Stop()
a.Pause()
}
func (a *Adminz) abortHandler(w http.ResponseWriter, r *http.Request) {
log.Println("abortabortabort called! Pausing service")
a.Stop()
a.Pause()
}
func (a *Adminz) gcHandler(w http.ResponseWriter, r *http.Request) {
var mem runtime.MemStats
mb := uint64(1024 * 1024)
runtime.ReadMemStats(&mem)
fmt.Fprintln(w, "Before")
fmt.Fprintln(w, "\tAlloc\t", mem.Alloc/mb)
fmt.Fprintln(w, "\tTotalAlloc:\t", mem.TotalAlloc/mb)
fmt.Fprintln(w, "\tHeapAlloc:\t", mem.HeapAlloc/mb)
fmt.Fprintln(w, "\tHeapSys:\t", mem.HeapSys/mb)
fmt.Fprintln(w, "\tSys:\t", mem.Sys/mb)
a.Lock()
was := a.doPause()
runtime.GC()
if was {
a.doResume()
}
a.Unlock()
runtime.ReadMemStats(&mem)
fmt.Fprintln(w, "After")
fmt.Fprintln(w, "\tAlloc\t", mem.Alloc/mb)
fmt.Fprintln(w, "\tTotalAlloc:\t", mem.TotalAlloc/mb)
fmt.Fprintln(w, "\tHeapAlloc:\t", mem.HeapAlloc/mb)
fmt.Fprintln(w, "\tHeapSys:\t", mem.HeapSys/mb)
fmt.Fprintln(w, "\tSys:\t", mem.Sys/mb)
w.Write([]byte("OK"))
}
|
package bagins_test
import (
"github.com/APTrust/bagins"
"github.com/APTrust/bagins/bagutil"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
)
func TestNewBag(t *testing.T) {
// Use this ChecksumAlgorithm for the tests.
algo := "sha1"
hsh, _ := bagutil.LookupHashFunc(algo)
cs := bagutil.NewChecksumAlgorithm(algo, hsh)
// It should raise an error if the destination dir does not exist.
badLocation := filepath.Join(os.TempDir(), "/GOTESTNOT_EXISTs/")
_, err := bagins.NewBag(badLocation, "_GOFAILBAG_", cs)
if err == nil {
t.Error("NewBag function does not recognize when a directory does not exist!")
}
// It should raise an error if the bag already exists.
os.MkdirAll(filepath.Join(badLocation, "_GOFAILBAG_"), 0766)
defer os.RemoveAll(badLocation)
_, err = bagins.NewBag(badLocation, "_GOFAILBAG_", cs)
if err == nil {
t.Error("Error not thrown when bag already exists as expected.")
}
// It should create a bag without any errors.
bag, err := bagins.NewBag(os.TempDir(), "_GOTESTBAG_", cs)
if err != nil {
t.Error(err)
}
defer os.RemoveAll(bag.Path())
// It should find all of the following files and directories.
if _, err = os.Stat(filepath.Join(os.TempDir(), "_GOTESTBAG_")); os.IsNotExist(err) {
t.Error("Bag directory does not exist!")
}
if data, err := os.Stat(filepath.Join(os.TempDir(), "_GOTESTBAG_", "data")); os.IsNotExist(err) || !data.IsDir() {
t.Error("Data directory does not exist or is not a directory!")
}
if _, err = os.Stat(filepath.Join(bag.Path(), "bagit.txt")); os.IsNotExist(err) {
bi, err := bag.BagInfo()
if err != nil {
t.Error(err)
}
t.Errorf("bagit.txt does not exist! %s", bi.Name())
}
if _, err = os.Stat(filepath.Join(os.TempDir(), "_GOTESTBAG_", "manifest-sha1.txt")); os.IsNotExist(err) {
t.Error("manifest-sha1.txt does not exist!")
}
}
// It should place an appropriate file in the data directory and add the fixity to the manifest.
func TestAddFile(t *testing.T) {
// Setup the test file to add for the test.
fi, _ := ioutil.TempFile("", "TEST_GO_ADDFILE_")
fi.WriteString("Test the checksum")
fi.Close()
defer os.Remove(fi.Name())
// Setup the Test Bag
algo := "sha1"
hsh, _ := bagutil.LookupHashFunc(algo)
cs := bagutil.NewChecksumAlgorithm(algo, hsh)
bag, err := bagins.NewBag(os.TempDir(), "_GOTESTBAG_", cs)
if err != nil {
t.Error(err)
}
defer os.RemoveAll(bag.Path())
// It should return an error when trying to add a file that doesn't exist.
if err = bag.AddFile("idontexist.txt", "idontexist.txt"); err == nil {
t.Errorf("Adding a nonexistant file did not generate an error!")
}
// It should and a file to the data directory and generate a fixity value.
expFile := "my/nested/dir/mytestfile.txt"
if err = bag.AddFile(fi.Name(), expFile); err != nil {
t.Error(err)
}
// It should have created the file in the payload directory.
_, err = os.Stat(filepath.Join(bag.Path(), "data", expFile))
if err != nil {
t.Error("Testing if payload file created:", err)
}
// It should have calulated the fixity and put it in the manifest.
mf, _ := bag.Manifest()
expKey := filepath.Join("data", expFile)
fx, ok := mf.Data[expKey]
if !ok {
t.Error("Unable to find entry in manfest: ", expKey)
}
if len(fx) != 40 {
t.Errorf("Expected %d character fixity but returned: %d", 32, len(fx))
}
}
func TestAddDir(t *testing.T) {
// Setup source files to test
srcDir, _ := ioutil.TempDir("", "_GOTEST_PAYLOAD_SRC_")
for i := 0; i < 50; i++ {
fi, _ := ioutil.TempFile(srcDir, "TEST_GO_ADDFILE_")
fi.WriteString("Test the checksum")
fi.Close()
}
defer os.RemoveAll(srcDir)
// Setup the test bag
algo := "sha1"
hsh, _ := bagutil.LookupHashFunc(algo)
cs := bagutil.NewChecksumAlgorithm(algo, hsh)
bag, err := bagins.NewBag(os.TempDir(), "_GOTESTBAG_", cs)
if err != nil {
t.Error(err)
}
defer os.RemoveAll(bag.Path())
// It should produce no errors
if errs := bag.AddDir(srcDir); len(errs) != 0 {
t.Error(errs)
}
// It should produce 50 manifest entries
// It should generate entries in the manifest
mf, _ := bag.Manifest()
// It should produce 50 manifest entries
if len(mf.Data) != 50 {
t.Error("Expected 50 manifest entries but returned", len(mf.Data))
}
// It should contain the proper checksums for each file.
for key, fx := range mf.Data {
expFx := "da909ba395016f2a64b04d706520db6afa74fc95"
expPfx := filepath.Join("data", "TEST_GO_ADDFILE_")
if fx != expFx {
t.Error("Fixity error!", fx, "does not match expected", expFx)
}
if !strings.HasPrefix(key, expPfx) {
t.Error(key, "does not start with", expPfx)
}
}
}
func TestManifest(t *testing.T) {
// Setup the test bag
algo := "sha1"
hsh, _ := bagutil.LookupHashFunc(algo)
cs := bagutil.NewChecksumAlgorithm(algo, hsh)
bag, err := bagins.NewBag(os.TempDir(), "_GOTESTBAG_", cs)
if err != nil {
t.Error(err)
}
defer os.RemoveAll(bag.Path())
// It should have the expected name and return no error.
mf, err := bag.Manifest()
if err != nil {
t.Error(err)
}
exp := "manifest-sha1.txt"
if filepath.Base(mf.Name()) != exp {
t.Error("Expected manifest name", exp, "but returned", filepath.Base(mf.Name()))
}
}
Refactored bag setup to a single method in tests.
package bagins_test
import (
"github.com/APTrust/bagins"
"github.com/APTrust/bagins/bagutil"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
)
func setupTestBag() (*bagins.Bag, error) {
algo := "sha1"
hsh, _ := bagutil.LookupHashFunc(algo)
cs := bagutil.NewChecksumAlgorithm(algo, hsh)
bag, err := bagins.NewBag(os.TempDir(), "_GOTESTBAG_", cs)
if err != nil {
return nil, err
}
return bag, nil
}
func TestNewBag(t *testing.T) {
// Use this ChecksumAlgorithm for the tests.
algo := "sha1"
hsh, _ := bagutil.LookupHashFunc(algo)
cs := bagutil.NewChecksumAlgorithm(algo, hsh)
// It should raise an error if the destination dir does not exist.
badLocation := filepath.Join(os.TempDir(), "/GOTESTNOT_EXISTs/")
_, err := bagins.NewBag(badLocation, "_GOFAILBAG_", cs)
if err == nil {
t.Error("NewBag function does not recognize when a directory does not exist!")
}
// It should raise an error if the bag already exists.
os.MkdirAll(filepath.Join(badLocation, "_GOFAILBAG_"), 0766)
defer os.RemoveAll(badLocation)
_, err = bagins.NewBag(badLocation, "_GOFAILBAG_", cs)
if err == nil {
t.Error("Error not thrown when bag already exists as expected.")
}
// It should create a bag without any errors.
bag, err := setupTestBag()
defer os.RemoveAll(bag.Path())
// It should find all of the following files and directories.
if _, err = os.Stat(filepath.Join(os.TempDir(), "_GOTESTBAG_")); os.IsNotExist(err) {
t.Error("Bag directory does not exist!")
}
if data, err := os.Stat(filepath.Join(os.TempDir(), "_GOTESTBAG_", "data")); os.IsNotExist(err) || !data.IsDir() {
t.Error("Data directory does not exist or is not a directory!")
}
if _, err = os.Stat(filepath.Join(bag.Path(), "bagit.txt")); os.IsNotExist(err) {
bi, err := bag.BagInfo()
if err != nil {
t.Error(err)
}
t.Errorf("bagit.txt does not exist! %s", bi.Name())
}
if _, err = os.Stat(filepath.Join(os.TempDir(), "_GOTESTBAG_", "manifest-sha1.txt")); os.IsNotExist(err) {
t.Error("manifest-sha1.txt does not exist!")
}
}
// It should place an appropriate file in the data directory and add the fixity to the manifest.
func TestAddFile(t *testing.T) {
// Setup the test file to add for the test.
fi, _ := ioutil.TempFile("", "TEST_GO_ADDFILE_")
fi.WriteString("Test the checksum")
fi.Close()
defer os.Remove(fi.Name())
// Setup the Test Bag
bag, _ := setupTestBag()
defer os.RemoveAll(bag.Path())
// It should return an error when trying to add a file that doesn't exist.
if err := bag.AddFile("idontexist.txt", "idontexist.txt"); err == nil {
t.Errorf("Adding a nonexistant file did not generate an error!")
}
// It should and a file to the data directory and generate a fixity value.
expFile := "my/nested/dir/mytestfile.txt"
if err := bag.AddFile(fi.Name(), expFile); err != nil {
t.Error(err)
}
// It should have created the file in the payload directory.
_, err := os.Stat(filepath.Join(bag.Path(), "data", expFile))
if err != nil {
t.Error("Testing if payload file created:", err)
}
// It should have calulated the fixity and put it in the manifest.
mf, _ := bag.Manifest()
expKey := filepath.Join("data", expFile)
fx, ok := mf.Data[expKey]
if !ok {
t.Error("Unable to find entry in manfest: ", expKey)
}
if len(fx) != 40 {
t.Errorf("Expected %d character fixity but returned: %d", 32, len(fx))
}
}
func TestAddDir(t *testing.T) {
// Setup source files to test
srcDir, _ := ioutil.TempDir("", "_GOTEST_PAYLOAD_SRC_")
for i := 0; i < 50; i++ {
fi, _ := ioutil.TempFile(srcDir, "TEST_GO_ADDFILE_")
fi.WriteString("Test the checksum")
fi.Close()
}
defer os.RemoveAll(srcDir)
// Setup the test bag
bag, _ := setupTestBag()
defer os.RemoveAll(bag.Path())
// It should produce no errors
if errs := bag.AddDir(srcDir); len(errs) != 0 {
t.Error(errs)
}
// It should produce 50 manifest entries
// It should generate entries in the manifest
mf, _ := bag.Manifest()
// It should produce 50 manifest entries
if len(mf.Data) != 50 {
t.Error("Expected 50 manifest entries but returned", len(mf.Data))
}
// It should contain the proper checksums for each file.
for key, fx := range mf.Data {
expFx := "da909ba395016f2a64b04d706520db6afa74fc95"
expPfx := filepath.Join("data", "TEST_GO_ADDFILE_")
if fx != expFx {
t.Error("Fixity error!", fx, "does not match expected", expFx)
}
if !strings.HasPrefix(key, expPfx) {
t.Error(key, "does not start with", expPfx)
}
}
}
func TestManifest(t *testing.T) {
// Setup the test bag
bag, _ := setupTestBag()
defer os.RemoveAll(bag.Path())
// It should have the expected name and return no error.
mf, err := bag.Manifest()
if err != nil {
t.Error(err)
}
exp := "manifest-sha1.txt"
if filepath.Base(mf.Name()) != exp {
t.Error("Expected manifest name", exp, "but returned", filepath.Base(mf.Name()))
}
}
func TestAddTagFile(t *testing.T) {
// Setup the test bag
}
|
// Copyright 2015 Mikio Hara. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ipoam
import (
"net"
"runtime"
"sync/atomic"
"time"
"golang.org/x/net/icmp"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
)
type cookie uint64
func (c cookie) icmpID() int { return int(c >> 48) }
func (c cookie) icmpSeq() int { return int(c << 16 >> 48) }
func (c cookie) udpSport() int { return int(c >> 48) }
func (c cookie) udpDport() int { return int(c << 16 >> 48) }
func (c cookie) protocol() int { return int(c & 0xff) }
func icmpCookie(protocol, id, seq int) cookie {
return cookie(id)&0xffff<<48 | cookie(seq)&0xffff<<32 | cookie(protocol)&0xff
}
func udpCookie(protocol, sport, dport int) cookie {
return cookie(sport)&0xffff<<48 | cookie(dport)&0xffff<<32 | cookie(protocol)&0xff
}
// A maint represents a maintenance endpoint.
type maint struct {
cookie uint64
emitReport int32
report chan Report // buffered report channel
}
func (t *maint) setICMPCookie(protocol, id, seq int) {
atomic.StoreUint64(&t.cookie, uint64(icmpCookie(protocol, id, seq)))
}
func (t *maint) setUDPCookie(protocol, sport, dport int) {
atomic.StoreUint64(&t.cookie, uint64(udpCookie(protocol, sport, dport)))
}
func (t *maint) monitor(c *conn) {
var r Report
b := make([]byte, 1<<16-1)
for {
rb, h, cm, peer, err := c.readFrom(b)
if err != nil {
r.Error = err
t.writeReport(&r)
if err, ok := err.(net.Error); ok && (err.Timeout() || err.Temporary()) {
continue
}
return
}
r.Time = time.Now()
if !c.rawSocket {
r.Src = peer.(*net.UDPAddr).IP
} else {
r.Src = peer.(*net.IPAddr).IP
}
switch h := h.(type) {
case *ipv4.Header:
r.TC = h.TOS
if runtime.GOOS == "solaris" {
r.Hops = h.TTL
}
}
switch cm := cm.(type) {
case *ipv4.ControlMessage:
if runtime.GOOS != "solaris" {
r.Hops = cm.TTL
}
r.Dst = cm.Dst
ifi, _ := net.InterfaceByIndex(cm.IfIndex)
r.Interface = ifi
case *ipv6.ControlMessage:
r.TC = cm.TrafficClass
r.Hops = cm.HopLimit
r.Dst = cm.Dst
ifi, _ := net.InterfaceByIndex(cm.IfIndex)
r.Interface = ifi
}
m, err := icmp.ParseMessage(c.protocol, rb)
if err != nil {
r.Error = err
t.writeReport(&r)
continue
}
r.ICMP = m
mcookie := cookie(atomic.LoadUint64(&t.cookie))
if r.ICMP.Type == ipv4.ICMPTypeEchoReply || r.ICMP.Type == ipv6.ICMPTypeEchoReply {
cookie := icmpCookie(c.protocol, m.Body.(*icmp.Echo).ID, m.Body.(*icmp.Echo).Seq)
if cookie == mcookie || runtime.GOOS == "linux" && !c.rawSocket {
t.writeReport(&r)
}
continue
}
r.OrigHeader, r.OrigPayload, err = parseICMPError(m)
if err != nil {
r.Error = err
t.writeReport(&r)
continue
}
switch parseOrigIP(r.OrigHeader) {
case ianaProtocolICMP, ianaProtocolIPv6ICMP:
m, err := icmp.ParseMessage(r.ICMP.Type.Protocol(), r.OrigPayload)
if err != nil {
r.Error = err
t.writeReport(&r)
continue
}
var cookie cookie
if echo, ok := m.Body.(*icmp.Echo); ok {
cookie = icmpCookie(c.protocol, echo.ID, echo.Seq)
}
if cookie == mcookie || runtime.GOOS == "linux" && !c.rawSocket {
t.writeReport(&r)
}
case ianaProtocolUDP:
sport, dport := parseOrigUDP(r.OrigPayload)
cookie := udpCookie(ianaProtocolUDP, sport, dport)
if cookie == mcookie {
t.writeReport(&r)
}
default: // e.g., ianaProtocolIPv6Frag
t.writeReport(&r)
}
}
}
func (t *maint) writeReport(r *Report) {
emit := atomic.LoadInt32(&t.emitReport)
if emit > 0 {
t.report <- *r
}
}
// Report returns the buffered test report channel.
func (t *maint) Report() <-chan Report {
return t.report
}
// StartReport enables emitting test reports.
func (t *maint) StartReport() {
atomic.StoreInt32(&t.emitReport, 1)
}
// StopReport disables emitting test reports.
func (t *maint) StopReport() {
atomic.StoreInt32(&t.emitReport, 0)
}
ipoam: avoid nil pointer dereference on windows
Both ipv4 and ipv6 packages still don't support Windows.
// Copyright 2015 Mikio Hara. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ipoam
import (
"net"
"runtime"
"sync/atomic"
"time"
"golang.org/x/net/icmp"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
)
type cookie uint64
func (c cookie) icmpID() int { return int(c >> 48) }
func (c cookie) icmpSeq() int { return int(c << 16 >> 48) }
func (c cookie) udpSport() int { return int(c >> 48) }
func (c cookie) udpDport() int { return int(c << 16 >> 48) }
func (c cookie) protocol() int { return int(c & 0xff) }
func icmpCookie(protocol, id, seq int) cookie {
return cookie(id)&0xffff<<48 | cookie(seq)&0xffff<<32 | cookie(protocol)&0xff
}
func udpCookie(protocol, sport, dport int) cookie {
return cookie(sport)&0xffff<<48 | cookie(dport)&0xffff<<32 | cookie(protocol)&0xff
}
// A maint represents a maintenance endpoint.
type maint struct {
cookie uint64
emitReport int32
report chan Report // buffered report channel
}
func (t *maint) setICMPCookie(protocol, id, seq int) {
atomic.StoreUint64(&t.cookie, uint64(icmpCookie(protocol, id, seq)))
}
func (t *maint) setUDPCookie(protocol, sport, dport int) {
atomic.StoreUint64(&t.cookie, uint64(udpCookie(protocol, sport, dport)))
}
func (t *maint) monitor(c *conn) {
var r Report
b := make([]byte, 1<<16-1)
for {
rb, h, cm, peer, err := c.readFrom(b)
if err != nil {
r.Error = err
t.writeReport(&r)
if err, ok := err.(net.Error); ok && (err.Timeout() || err.Temporary()) {
continue
}
return
}
r.Time = time.Now()
if !c.rawSocket {
r.Src = peer.(*net.UDPAddr).IP
} else {
r.Src = peer.(*net.IPAddr).IP
}
switch h := h.(type) {
case *ipv4.Header:
r.TC = h.TOS
if runtime.GOOS == "solaris" {
r.Hops = h.TTL
}
}
switch cm := cm.(type) {
case *ipv4.ControlMessage:
if cm != nil {
if runtime.GOOS != "solaris" {
r.Hops = cm.TTL
}
r.Dst = cm.Dst
ifi, _ := net.InterfaceByIndex(cm.IfIndex)
r.Interface = ifi
}
case *ipv6.ControlMessage:
if cm != nil {
r.TC = cm.TrafficClass
r.Hops = cm.HopLimit
r.Dst = cm.Dst
ifi, _ := net.InterfaceByIndex(cm.IfIndex)
r.Interface = ifi
}
}
m, err := icmp.ParseMessage(c.protocol, rb)
if err != nil {
r.Error = err
t.writeReport(&r)
continue
}
r.ICMP = m
mcookie := cookie(atomic.LoadUint64(&t.cookie))
if r.ICMP.Type == ipv4.ICMPTypeEchoReply || r.ICMP.Type == ipv6.ICMPTypeEchoReply {
cookie := icmpCookie(c.protocol, m.Body.(*icmp.Echo).ID, m.Body.(*icmp.Echo).Seq)
if cookie == mcookie || runtime.GOOS == "linux" && !c.rawSocket {
t.writeReport(&r)
}
continue
}
r.OrigHeader, r.OrigPayload, err = parseICMPError(m)
if err != nil {
r.Error = err
t.writeReport(&r)
continue
}
switch parseOrigIP(r.OrigHeader) {
case ianaProtocolICMP, ianaProtocolIPv6ICMP:
m, err := icmp.ParseMessage(r.ICMP.Type.Protocol(), r.OrigPayload)
if err != nil {
r.Error = err
t.writeReport(&r)
continue
}
var cookie cookie
if echo, ok := m.Body.(*icmp.Echo); ok {
cookie = icmpCookie(c.protocol, echo.ID, echo.Seq)
}
if cookie == mcookie || runtime.GOOS == "linux" && !c.rawSocket {
t.writeReport(&r)
}
case ianaProtocolUDP:
sport, dport := parseOrigUDP(r.OrigPayload)
cookie := udpCookie(ianaProtocolUDP, sport, dport)
if cookie == mcookie {
t.writeReport(&r)
}
default: // e.g., ianaProtocolIPv6Frag
t.writeReport(&r)
}
}
}
func (t *maint) writeReport(r *Report) {
emit := atomic.LoadInt32(&t.emitReport)
if emit > 0 {
t.report <- *r
}
}
// Report returns the buffered test report channel.
func (t *maint) Report() <-chan Report {
return t.report
}
// StartReport enables emitting test reports.
func (t *maint) StartReport() {
atomic.StoreInt32(&t.emitReport, 1)
}
// StopReport disables emitting test reports.
func (t *maint) StopReport() {
atomic.StoreInt32(&t.emitReport, 0)
}
|
package godoto
import (
"encoding/json"
"net/url"
"strconv"
)
type MatchHistory struct {
Status int `json:"status"`
Limit int `json:"num_results"`
Total int `json:"total_results"`
Remaining int `json:"results_remaining"`
Matches []Match `json:"matches"`
}
type Match struct {
Id int `json:"match_id"`
Sequence int `json:"match_seq_num"`
Start int `json:"start_time"`
Type int `json:"lobby_type"`
Players []Player `json:"players"`
}
type Player struct {
Id int `json:"account_id"`
Position int `json:"player_slot"`
Hero int `json:"hero_id"`
}
type Matches []MatchDetails
type MatchDetails struct {
Players []PlayerDetails `json:"players"`
RadiantWin bool `json:"radiant_win"`
Duration int `json:"duration"`
Start int `json:"start_time"`
Id int `json:"match_id"`
Sequence int `json:"match_seq_num"`
RadiantTower int `json:"tower_status_radiant"`
DireTower int `json:"tower_status_dire"`
RadiantBarracks int `json:"barracks_status_radiant"`
DireBarracks int `json:"barracks_status_dire"`
Cluster int `json:"cluster"`
FirstBlood int `json:"first_blood_time"`
Type int `json:"lobby_type"`
HumanPlayers int `json:"human_players"`
League int `json:"leagueid"`
Positive int `json:"positive_votes"`
Negative int `json:"negative_votes"`
GameMode int `json:"game_mode"`
//Drafts Draft `json:"picks_ban"`
}
type PlayerDetails struct {
Id int `json:"account_id"`
Position int `json:"player_slot"`
Hero int `json:"hero_id"`
Item0 int `json:"item_0"`
Item1 int `json:"item_1"`
Item2 int `json:"item_2"`
Item3 int `json:"item_3"`
Item4 int `json:"item_4"`
Item5 int `json:"item_5"`
Kills int `json:"kills"`
Deaths int `json:"deaths"`
Assists int `json:"assists"`
LeaverStatus int `json:"leaver_status"`
Gold int `json:"gold"`
LH int `json:"last_hits"`
DH int `json:"denies"`
GPM int `json:"gold_per_min"`
XPM int `json:"xp_per_min"`
GS int `json:"gold_spent"`
HD int `json:"hero_damage"`
TD int `json:"tower_damage"`
HH int `json:"hero_healing"`
Level int `json:"level"`
Abilities []Ability `json:"ability_upgrades"`
}
type Ability struct {
Id int `json:"ability"`
Time int `json:"time"`
Level int `json:"level"`
}
func GetMatchHistory(accountID int, gameMode int, skill int, heroID int, minPlayers int, leagueID int, startAtMatchID int, limit int, tournamentOnly bool) (history MatchHistory) {
api := DotaAPI("GetMatchHistory", true)
api.Params = url.Values{}
api.Params.Set("account_id", strconv.Itoa(accountID))
api.Params.Set("game_mode", strconv.Itoa(gameMode))
api.Params.Set("skill", strconv.Itoa(skill))
api.Params.Set("hero_id", strconv.Itoa(heroID))
api.Params.Set("min_players", strconv.Itoa(minPlayers))
api.Params.Set("league_id", strconv.Itoa(leagueID))
api.Params.Set("start_at_match_id", strconv.Itoa(startAtMatchID))
if limit > 0 {
api.Params.Set("matches_requested", strconv.Itoa(limit))
} else {
api.Params.Set("matches_requested", "5")
}
if tournamentOnly {
api.Params.Set("tournament_games_only", "1")
}
result := api.GetResult()
history = MatchHistory{}
err := json.Unmarshal(result.Data, &history)
pError(err)
return
}
func GetMatchDetails(matchID int) (match MatchDetails) {
api := DotaAPI("GetMatchDetails", true)
api.Params = url.Values{}
api.Params.Set("match_id", strconv.Itoa(matchID))
result := api.GetResult()
match = MatchDetails{}
err := json.Unmarshal(result.Data, &match)
pError(err)
return
}
func (this Match) GetDetails() MatchDetails {
return GetMatchDetails(this.Id)
}
func (this MatchHistory) GetDetails() (matches Matches) {
done := make(chan bool)
history := this.Matches
total := len(history) - 1
for i, element := range history {
go func(i int, element Match) {
matches = append(matches, element.GetDetails())
if i == total {
done <- true
}
}(i, element)
}
<-done
return
}
func (this PlayerDetails) GetPosition() (isDire bool, position int) {
isDire = false
if (this.Position & (1 << 7)) >> 7 == 1 {
isDire = true
}
position = this.Position & 111
return
}
func (this MatchDetails) GetPosition(accountID int) (isDire bool, position int) {
isDire, position = false, 0
// IMPL QUICK BOOL SEARCH
for _, player := range this.Players {
if accountID == player.Id {
isDire, position = player.GetPosition()
}
}
return
}
Return after match is found (quicker)
package godoto
import (
"encoding/json"
"net/url"
"strconv"
)
type MatchHistory struct {
Status int `json:"status"`
Limit int `json:"num_results"`
Total int `json:"total_results"`
Remaining int `json:"results_remaining"`
Matches []Match `json:"matches"`
}
type Match struct {
Id int `json:"match_id"`
Sequence int `json:"match_seq_num"`
Start int `json:"start_time"`
Type int `json:"lobby_type"`
Players []Player `json:"players"`
}
type Player struct {
Id int `json:"account_id"`
Position int `json:"player_slot"`
Hero int `json:"hero_id"`
}
type Matches []MatchDetails
type MatchDetails struct {
Players []PlayerDetails `json:"players"`
RadiantWin bool `json:"radiant_win"`
Duration int `json:"duration"`
Start int `json:"start_time"`
Id int `json:"match_id"`
Sequence int `json:"match_seq_num"`
RadiantTower int `json:"tower_status_radiant"`
DireTower int `json:"tower_status_dire"`
RadiantBarracks int `json:"barracks_status_radiant"`
DireBarracks int `json:"barracks_status_dire"`
Cluster int `json:"cluster"`
FirstBlood int `json:"first_blood_time"`
Type int `json:"lobby_type"`
HumanPlayers int `json:"human_players"`
League int `json:"leagueid"`
Positive int `json:"positive_votes"`
Negative int `json:"negative_votes"`
GameMode int `json:"game_mode"`
//Drafts Draft `json:"picks_ban"`
}
type PlayerDetails struct {
Id int `json:"account_id"`
Position int `json:"player_slot"`
Hero int `json:"hero_id"`
Item0 int `json:"item_0"`
Item1 int `json:"item_1"`
Item2 int `json:"item_2"`
Item3 int `json:"item_3"`
Item4 int `json:"item_4"`
Item5 int `json:"item_5"`
Kills int `json:"kills"`
Deaths int `json:"deaths"`
Assists int `json:"assists"`
LeaverStatus int `json:"leaver_status"`
Gold int `json:"gold"`
LH int `json:"last_hits"`
DH int `json:"denies"`
GPM int `json:"gold_per_min"`
XPM int `json:"xp_per_min"`
GS int `json:"gold_spent"`
HD int `json:"hero_damage"`
TD int `json:"tower_damage"`
HH int `json:"hero_healing"`
Level int `json:"level"`
Abilities []Ability `json:"ability_upgrades"`
}
type Ability struct {
Id int `json:"ability"`
Time int `json:"time"`
Level int `json:"level"`
}
func GetMatchHistory(accountID int, gameMode int, skill int, heroID int, minPlayers int, leagueID int, startAtMatchID int, limit int, tournamentOnly bool) (history MatchHistory) {
api := DotaAPI("GetMatchHistory", true)
api.Params = url.Values{}
api.Params.Set("account_id", strconv.Itoa(accountID))
api.Params.Set("game_mode", strconv.Itoa(gameMode))
api.Params.Set("skill", strconv.Itoa(skill))
api.Params.Set("hero_id", strconv.Itoa(heroID))
api.Params.Set("min_players", strconv.Itoa(minPlayers))
api.Params.Set("league_id", strconv.Itoa(leagueID))
api.Params.Set("start_at_match_id", strconv.Itoa(startAtMatchID))
if limit > 0 {
api.Params.Set("matches_requested", strconv.Itoa(limit))
} else {
api.Params.Set("matches_requested", "5")
}
if tournamentOnly {
api.Params.Set("tournament_games_only", "1")
}
result := api.GetResult()
history = MatchHistory{}
err := json.Unmarshal(result.Data, &history)
pError(err)
return
}
func GetMatchDetails(matchID int) (match MatchDetails) {
api := DotaAPI("GetMatchDetails", true)
api.Params = url.Values{}
api.Params.Set("match_id", strconv.Itoa(matchID))
result := api.GetResult()
match = MatchDetails{}
err := json.Unmarshal(result.Data, &match)
pError(err)
return
}
func (this Match) GetDetails() MatchDetails {
return GetMatchDetails(this.Id)
}
func (this MatchHistory) GetDetails() (matches Matches) {
done := make(chan bool)
history := this.Matches
total := len(history) - 1
for i, element := range history {
go func(i int, element Match) {
matches = append(matches, element.GetDetails())
if i == total {
done <- true
}
}(i, element)
}
<-done
return
}
func (this PlayerDetails) GetPosition() (isDire bool, position int) {
isDire = false
if (this.Position & (1 << 7)) >> 7 == 1 {
isDire = true
}
position = this.Position & 111
return
}
func (this MatchDetails) GetPosition(accountID int) (isDire bool, position int) {
isDire, position = false, 0
for _, player := range this.Players {
if accountID == player.Id {
isDire, position = player.GetPosition()
return
}
}
return
} |
package amazonproduct
import (
"net/url"
"sort"
"fmt"
"strings"
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"time"
"net/http"
"io/ioutil"
)
type AmazonProductAPI struct {
AccessKey string
SecretKey string
AssociateTag string
Host string
}
func (api AmazonProductAPI) ItemSearchByKeyword(Keywords string) (string, error) {
params := map[string] string {
"Keywords": Keywords,
"ResponseGroup" : "Images,ItemAttributes,Small,EditorialReview",
}
return api.ItemSearch("All", params)
}
func (api AmazonProductAPI) ItemSearchByKeywordWithResponseGroup(Keywords string, ResponseGroup string) (string, error) {
params := map[string] string {
"Keywords": Keywords,
"ResponseGroup" : ResponseGroup,
}
return api.ItemSearch("All", params)
}
func (api AmazonProductAPI) ItemSearch(SearchIndex string, Parameters map[string] string) (string,error){
Parameters["SearchIndex"] = SearchIndex
genUrl, err := GenerateAmazonUrl(api, "ItemSearch", Parameters)
if (err != nil) {
return "", err
}
SetTimestamp(genUrl)
signedurl,err := SignAmazonUrl(genUrl, api)
if (err != nil) {
return "", err
}
resp, err := http.Get(signedurl)
if (err != nil) {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if (err != nil) {
return "", err
}
return string(body), nil
}
func GenerateAmazonUrl(api AmazonProductAPI, Operation string, Parameters map[string] string) (finalUrl *url.URL, err error) {
result,err := url.Parse(api.Host)
if (err != nil) {
return nil, err
}
result.Host = api.Host
result.Scheme = "http"
result.Path = "/onca/xml"
values := url.Values{}
values.Add("Operation", Operation)
values.Add("Service", "AWSECommerceService")
values.Add("AWSAccessKeyId", api.AccessKey)
values.Add("Version", "2009-01-01")
values.Add("AssociateTag", api.AssociateTag)
for k, v := range Parameters {
values.Set(k, v)
}
params := values.Encode()
result.RawQuery = params
return result, nil
}
func SetTimestamp(origUrl *url.URL) (err error) {
values, err := url.ParseQuery(origUrl.RawQuery)
if (err != nil) {
return err
}
values.Set("Timestamp", time.Now().UTC().Format(time.RFC3339))
origUrl.RawQuery = values.Encode()
return nil
}
func SignAmazonUrl(origUrl *url.URL, api AmazonProductAPI) (signedUrl string , err error){
escapeUrl := strings.Replace(origUrl.RawQuery, ",", "%2C", -1)
escapeUrl = strings.Replace(escapeUrl, ":", "%3A", -1)
params := strings.Split(escapeUrl, "&")
sort.Strings(params)
sortedParams := strings.Join(params, "&")
toSign := fmt.Sprintf("GET\n%s\n%s\n%s", origUrl.Host, origUrl.Path, sortedParams)
hasher := hmac.New(sha256.New, []byte(api.SecretKey))
_, err = hasher.Write([]byte(toSign))
if (err != nil) {
return "", err
}
hash := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
hash = url.QueryEscape(hash)
newParams := fmt.Sprintf("%s&Signature=%s", sortedParams, hash)
origUrl.RawQuery = newParams
return origUrl.String(), nil
}
ItemPage support
package amazonproduct
import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"sort"
"strings"
"time"
)
type AmazonProductAPI struct {
AccessKey string
SecretKey string
AssociateTag string
Host string
}
/*
ItemSearchByKeyword takes a string containg keywords and returns the search results
*/
func (api AmazonProductAPI) ItemSearchByKeyword(Keywords string, page int) (string, error) {
params := map[string]string{
"Keywords": Keywords,
"ResponseGroup": "Images,ItemAttributes,Small,EditorialReview",
"ItemPage": strconv.FormatInt(int64(page), 10),
}
return api.ItemSearch("All", params)
}
func (api AmazonProductAPI) ItemSearchByKeywordWithResponseGroup(Keywords string, ResponseGroup string) (string, error) {
params := map[string]string{
"Keywords": Keywords,
"ResponseGroup": ResponseGroup,
}
return api.ItemSearch("All", params)
}
func (api AmazonProductAPI) ItemSearch(SearchIndex string, Parameters map[string]string) (string, error) {
Parameters["SearchIndex"] = SearchIndex
genUrl, err := GenerateAmazonUrl(api, "ItemSearch", Parameters)
if err != nil {
return "", err
}
SetTimestamp(genUrl)
signedurl, err := SignAmazonUrl(genUrl, api)
if err != nil {
return "", err
}
resp, err := http.Get(signedurl)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body), nil
}
func GenerateAmazonUrl(api AmazonProductAPI, Operation string, Parameters map[string]string) (finalUrl *url.URL, err error) {
result, err := url.Parse(api.Host)
if err != nil {
return nil, err
}
result.Host = api.Host
result.Scheme = "http"
result.Path = "/onca/xml"
values := url.Values{}
values.Add("Operation", Operation)
values.Add("Service", "AWSECommerceService")
values.Add("AWSAccessKeyId", api.AccessKey)
values.Add("Version", "2009-01-01")
values.Add("AssociateTag", api.AssociateTag)
for k, v := range Parameters {
values.Set(k, v)
}
params := values.Encode()
result.RawQuery = params
return result, nil
}
func SetTimestamp(origUrl *url.URL) (err error) {
values, err := url.ParseQuery(origUrl.RawQuery)
if err != nil {
return err
}
values.Set("Timestamp", time.Now().UTC().Format(time.RFC3339))
origUrl.RawQuery = values.Encode()
return nil
}
func SignAmazonUrl(origUrl *url.URL, api AmazonProductAPI) (signedUrl string, err error) {
escapeUrl := strings.Replace(origUrl.RawQuery, ",", "%2C", -1)
escapeUrl = strings.Replace(escapeUrl, ":", "%3A", -1)
params := strings.Split(escapeUrl, "&")
sort.Strings(params)
sortedParams := strings.Join(params, "&")
toSign := fmt.Sprintf("GET\n%s\n%s\n%s", origUrl.Host, origUrl.Path, sortedParams)
hasher := hmac.New(sha256.New, []byte(api.SecretKey))
_, err = hasher.Write([]byte(toSign))
if err != nil {
return "", err
}
hash := base64.StdEncoding.EncodeToString(hasher.Sum(nil))
hash = url.QueryEscape(hash)
newParams := fmt.Sprintf("%s&Signature=%s", sortedParams, hash)
origUrl.RawQuery = newParams
return origUrl.String(), nil
}
|
package main
import (
"encoding/csv"
"encoding/json"
"flag"
"fmt"
"github.com/skelterjohn/go.matrix"
"github.com/ziutek/mymysql/mysql"
_ "github.com/ziutek/mymysql/native"
"log"
"math"
"os"
"runtime"
"sort"
"strconv"
)
const _DB_CONFIG_FILENAME = "db_config.SECRET.json"
const _OUTPUT_FILENAME = "output.csv"
const _QUERY_LIMIT = 100
const _PENALTY_PERCENTAGE_CUTOFF = 0.10
//How many solves a user must have to have their relative scale included.
//A low value gives you far more very low or very high scores than you shoul get.
const _MINIMUM_SOLVES = 10
var noLimitFlag bool
var printPuzzleDataFlag bool
var cullCheaterPercentageFlag float64
var minimumSolvesFlag int
var useMockData bool
var queryLimit int
func init() {
flag.BoolVar(&noLimitFlag, "a", false, "Specify to execute the solves query with no limit.")
flag.BoolVar(&printPuzzleDataFlag, "p", false, "Specify that you want puzzle data printed out in the output.")
flag.Float64Var(&cullCheaterPercentageFlag, "c", _PENALTY_PERCENTAGE_CUTOFF, "What percentage of solve time must be penalty for someone to be considered a cheater.")
flag.IntVar(&minimumSolvesFlag, "s", _MINIMUM_SOLVES, "How many solves a user must have their scores considered.")
flag.IntVar(&queryLimit, "n", _QUERY_LIMIT, "Number of solves to fetch from the database.")
flag.BoolVar(&useMockData, "m", false, "Use mock data (useful if you don't have a real database to test with).")
//We're going to be doing some heavy-duty matrix multiplication, and the matrix package can take advantage of multiple cores.
runtime.GOMAXPROCS(6)
}
type dbConfig struct {
Url string
Username string
Password string
DbName string
SolvesTable string
SolvesID string
SolvesPuzzleID string
SolvesTotalTime string
SolvesPenaltyTime string
SolvesUser string
PuzzlesTable string
PuzzlesID string
PuzzlesDifficulty string
PuzzlesName string
PuzzlesPuzzle string
}
var config dbConfig
type solve struct {
puzzleID int
totalTime int
penaltyTime int
}
type userSolvesCollection struct {
solves []solve
max int
min int
idPosition map[int]int
}
type puzzle struct {
id int
userRelativeDifficulty float32
difficultyRating int
name string
puzzle string
}
type puzzles []puzzle
type byUserRelativeDifficulty struct {
puzzles
}
func (self puzzles) Len() int {
return len(self)
}
func (self puzzles) Swap(i, j int) {
self[i], self[j] = self[j], self[i]
}
func (self byUserRelativeDifficulty) Less(i, j int) bool {
return self.puzzles[i].userRelativeDifficulty < self.puzzles[j].userRelativeDifficulty
}
type bySolveTimeDsc []solve
func (self bySolveTimeDsc) Len() int {
return len(self)
}
func (self bySolveTimeDsc) Swap(i, j int) {
self[i], self[j] = self[j], self[i]
}
func (self bySolveTimeDsc) Less(i, j int) bool {
//For the purposes of this algorithm, the "best" has to be lowest rank.
return self[i].totalTime > self[j].totalTime
}
func (self *userSolvesCollection) addSolve(solve solve) bool {
//Cull obviously incorrect solves.
if solve.totalTime == 0 {
return false
}
//Cull solves that leaned too heavily on hints.
if float64(solve.penaltyTime)/float64(solve.totalTime) > cullCheaterPercentageFlag {
return false
}
self.solves = append(self.solves, solve)
if len(self.solves) == 1 {
self.max = solve.totalTime
self.min = solve.totalTime
} else {
if self.max < solve.totalTime {
self.max = solve.totalTime
}
if self.min > solve.totalTime {
self.min = solve.totalTime
}
}
return true
}
//Whehter or not this should be included in calculation.
//Basically, whether the reltaiveDifficulties will all be valid.
//Normally this returns false if there is only one solve by the user, but could also
//happen when there are multiple solves but (crazily enough) they all have exactly the same solveTime.
//This DOES happen in the production dataset.
func (self *userSolvesCollection) valid() bool {
if self.max == self.min {
return false
}
if len(self.solves) < minimumSolvesFlag {
return false
}
return true
}
func (self *userSolvesCollection) relativeDifficulties() map[int]float32 {
//Returns a map of puzzle id to relative difficulty, normalized by our max and min.
avgSolveTimes := make(map[int]float32)
//Keep track of how many times we've seen each puzzle solved by this user so we can do correct averaging.
avgSolveTimesCount := make(map[int]int)
//First, collect the average solve time (in case the same user has solved more than once the same puzzle)
for _, solve := range self.solves {
currentAvgSolveTime := avgSolveTimes[solve.puzzleID]
avgSolveTimes[solve.puzzleID] = (currentAvgSolveTime*float32(avgSolveTimesCount[solve.puzzleID]) + float32(solve.totalTime)) / float32(avgSolveTimesCount[solve.puzzleID]+1)
avgSolveTimesCount[solve.puzzleID]++
}
//Now, relativize all of the scores.
result := make(map[int]float32)
for puzzleID, avgSolveTime := range avgSolveTimes {
result[puzzleID] = (avgSolveTime - float32(self.min)) / float32(self.max-self.min)
}
return result
}
func main() {
flag.Parse()
file, err := os.Open(_DB_CONFIG_FILENAME)
if err != nil {
log.Fatal("Could not find the config file at ", _DB_CONFIG_FILENAME, ". You should copy the SAMPLE one to that filename and configure.")
os.Exit(1)
}
defer file.Close()
decoder := json.NewDecoder(file)
if err := decoder.Decode(&config); err != nil {
log.Fatal("There was an error parsing JSON from the config file: ", err)
os.Exit(1)
}
difficutlyRatingsChan := make(chan map[int]puzzle)
go getPuzzleDifficultyRatings(difficutlyRatingsChan)
var db mysql.Conn
if useMockData {
db = &mockConnection{}
} else {
db = mysql.New("tcp", "", config.Url, config.Username, config.Password, config.DbName)
}
if err := db.Connect(); err != nil {
log.Fatal(err)
os.Exit(1)
}
var solvesQuery string
if noLimitFlag {
log.Println("Running without a limit for number of solves to retrieve.")
solvesQuery = "select %s, %s, %s, %s from %s"
} else {
log.Println("Running with a limit of ", queryLimit, " for number of solves to retrieve.")
solvesQuery = "select %s, %s, %s, %s from %s limit " + strconv.Itoa(queryLimit)
}
res, err := db.Start(solvesQuery, config.SolvesUser, config.SolvesPuzzleID, config.SolvesTotalTime, config.SolvesPenaltyTime, config.SolvesTable)
if err != nil {
log.Fatal(err)
os.Exit(1)
}
solvesByUser := make(map[string]*userSolvesCollection)
var userSolves *userSolvesCollection
var ok bool
var i int
var skippedSolves int
var skippedDuplicateSolves int
seenRows := make(map[string]bool)
//First, process all user records in the DB to collect all solves by userName.
for {
row, _ := res.GetRow()
if row == nil {
break
}
i++
rowHashValue := fmt.Sprintf("%v", row)
if _, seen := seenRows[rowHashValue]; seen {
skippedDuplicateSolves++
continue
} else {
seenRows[rowHashValue] = true
}
userSolves, ok = solvesByUser[row.Str(0)]
if !ok {
userSolves = new(userSolvesCollection)
userSolves.idPosition = make(map[int]int)
solvesByUser[row.Str(0)] = userSolves
}
if !userSolves.addSolve(solve{row.Int(1), row.Int(2), row.Int(3)}) {
skippedSolves++
}
}
log.Println("Processed ", i, " solves by ", len(solvesByUser), " users.")
log.Println("Skipped ", skippedSolves, " solves that cheated too much.")
log.Println("Skipped ", skippedDuplicateSolves, " solves because they were duplicates of solves seen earlier.")
//Now get the relative difficulty for each user's puzzles, and collect them.
relativeDifficultiesByPuzzle := make(map[int][]float32)
collectionByPuzzle := make(map[int]map[*userSolvesCollection]bool)
var skippedUsers int
for _, collection := range solvesByUser {
/*
//TODO: consider removing this logic and all of skipped users totally.
if !collection.valid() {
skippedUsers++
continue
}
*/
for puzzleID, relativeDifficulty := range collection.relativeDifficulties() {
relativeDifficultiesByPuzzle[puzzleID] = append(relativeDifficultiesByPuzzle[puzzleID], relativeDifficulty)
}
sort.Sort(bySolveTimeDsc(collection.solves))
for i, puzzle := range collection.solves {
collection.idPosition[puzzle.puzzleID] = i
collectionMap, ok := collectionByPuzzle[puzzle.puzzleID]
if !ok {
collectionMap = make(map[*userSolvesCollection]bool)
}
collectionMap[collection] = true
collectionByPuzzle[puzzle.puzzleID] = collectionMap
}
}
//Now, create the Markov Transition Matrix, according to algorithm MC4 of http://www.wisdom.weizmann.ac.il/~naor/PAPERS/rank_www10.html
//We start by creating a stacked array of float64's that we'll pass to the matrix library.
numPuzzles := len(collectionByPuzzle)
matrixData := make([][]float64, numPuzzles)
for i := range matrixData {
matrixData[i] = make([]float64, numPuzzles)
}
//Now we will associate each observed puzzleID with an index that it will be associated with in the matrix.
puzzleIndex := make([]int, numPuzzles)
counter := 0
for key, _ := range collectionByPuzzle {
puzzleIndex[counter] = key
counter++
}
//Now we start to build up the matrix according to the MC4 algorithm.
for i := 0; i < numPuzzles; i++ {
for j := 0; j < numPuzzles; j++ {
if i == j {
//The special case; stay in the same state. We'll treat it specially.
continue
}
//Convert the zero-index into the puzzle ID we're actually interested in.
p := puzzleIndex[i]
q := puzzleIndex[j]
//Find the intersection of userSolveCollections that contain both p and q.
pMap := collectionByPuzzle[p]
qMap := collectionByPuzzle[q]
var intersection []*userSolvesCollection
for collection, _ := range pMap {
if _, ok := qMap[collection]; ok {
intersection = append(intersection, collection)
}
}
//Next, calculate how many of the collections have q ranked better (lower!) than p.
count := 0
for _, collection := range intersection {
if collection.idPosition[q] < collection.idPosition[p] {
count++
}
}
//Is it a majority? if so, transition. if not, leave at 0.
if count > (len(intersection) / 2) {
matrixData[i][j] = 1.0
}
}
}
//Go through and normalize the probabilities in each row to sum to 1.
for i := 0; i < numPuzzles; i++ {
//Count the number of rows that are 1.0.
count := 0
for j := 0; j < numPuzzles; j++ {
if matrixData[i][j] > 0.0 {
count++
}
}
probability := 1.0 / float64(numPuzzles)
for j := 0; j < numPuzzles; j++ {
if i == j {
//The stay in the same space probability
matrixData[i][j] = float64(numPuzzles-count) * probability
} else if matrixData[i][j] > 0.0 {
matrixData[i][j] = probability
}
}
}
//Create an actual matrix with the data.
markovChain := matrix.MakeDenseMatrixStacked(matrixData)
for i := 0; i < 20; i++ {
markovChain = matrix.ParallelProduct(markovChain, markovChain)
//Are the rows converged enough for us to bail?
difference := 0.0
for i := 0; i < numPuzzles; i++ {
difference += math.Abs(markovChain.Get(0, i) - markovChain.Get(1, i))
}
if difference < 0.0001 {
log.Println("The markov chain converged after", i+1, "mulitplications.")
break
}
}
log.Println("Skipped ", skippedUsers, " users because they did not have enough solve times.")
puzzles := make([]puzzle, len(relativeDifficultiesByPuzzle))
var index int
for puzzleID, difficulties := range relativeDifficultiesByPuzzle {
var sum float32
for _, difficulty := range difficulties {
sum += difficulty
}
puzzles[index] = puzzle{id: puzzleID, userRelativeDifficulty: sum / float32(len(difficulties)), difficultyRating: -1}
index++
}
//Sort the puzzles by relative user difficulty
//We actually don't need the wrapper, since it will modify the underlying slice.
sort.Sort(byUserRelativeDifficulty{puzzles})
//Merge in the difficulty ratings from the server.
difficultyRatings := <-difficutlyRatingsChan
for i, puzzle := range puzzles {
info, ok := difficultyRatings[puzzle.id]
if ok {
puzzle.difficultyRating = info.difficultyRating
puzzle.name = info.name
puzzle.puzzle = info.puzzle
}
//It's not a pointer so we have to copy it back.
puzzles[i] = puzzle
}
//Now print the results to stdout.
csvOut := csv.NewWriter(os.Stdout)
for _, puzzle := range puzzles {
temp := []string{strconv.Itoa(puzzle.id), strconv.Itoa(puzzle.difficultyRating), fmt.Sprintf("%g", puzzle.userRelativeDifficulty), puzzle.name}
if printPuzzleDataFlag {
temp = append(temp, puzzle.puzzle)
}
csvOut.Write(temp)
}
csvOut.Flush()
}
func getPuzzleDifficultyRatings(result chan map[int]puzzle) {
var db mysql.Conn
if useMockData {
db = &mockConnection{}
} else {
db = mysql.New("tcp", "", config.Url, config.Username, config.Password, config.DbName)
}
if err := db.Connect(); err != nil {
log.Fatal(err)
os.Exit(1)
}
res, err := db.Start("select %s, %s, %s, %s from %s", config.PuzzlesID, config.PuzzlesDifficulty, config.PuzzlesName, config.PuzzlesPuzzle, config.PuzzlesTable)
if err != nil {
log.Fatal(err)
os.Exit(1)
}
puzzles := make(map[int]puzzle)
for {
row, _ := res.GetRow()
if row == nil {
break
}
puzzles[row.Int(0)] = puzzle{id: row.Int(0), difficultyRating: row.Int(1), name: row.Str(2), puzzle: row.Str(3)}
}
result <- puzzles
}
Print out a message about how many puzzles we've discovered.
package main
import (
"encoding/csv"
"encoding/json"
"flag"
"fmt"
"github.com/skelterjohn/go.matrix"
"github.com/ziutek/mymysql/mysql"
_ "github.com/ziutek/mymysql/native"
"log"
"math"
"os"
"runtime"
"sort"
"strconv"
)
const _DB_CONFIG_FILENAME = "db_config.SECRET.json"
const _OUTPUT_FILENAME = "output.csv"
const _QUERY_LIMIT = 100
const _PENALTY_PERCENTAGE_CUTOFF = 0.10
//How many solves a user must have to have their relative scale included.
//A low value gives you far more very low or very high scores than you shoul get.
const _MINIMUM_SOLVES = 10
var noLimitFlag bool
var printPuzzleDataFlag bool
var cullCheaterPercentageFlag float64
var minimumSolvesFlag int
var useMockData bool
var queryLimit int
func init() {
flag.BoolVar(&noLimitFlag, "a", false, "Specify to execute the solves query with no limit.")
flag.BoolVar(&printPuzzleDataFlag, "p", false, "Specify that you want puzzle data printed out in the output.")
flag.Float64Var(&cullCheaterPercentageFlag, "c", _PENALTY_PERCENTAGE_CUTOFF, "What percentage of solve time must be penalty for someone to be considered a cheater.")
flag.IntVar(&minimumSolvesFlag, "s", _MINIMUM_SOLVES, "How many solves a user must have their scores considered.")
flag.IntVar(&queryLimit, "n", _QUERY_LIMIT, "Number of solves to fetch from the database.")
flag.BoolVar(&useMockData, "m", false, "Use mock data (useful if you don't have a real database to test with).")
//We're going to be doing some heavy-duty matrix multiplication, and the matrix package can take advantage of multiple cores.
runtime.GOMAXPROCS(6)
}
type dbConfig struct {
Url string
Username string
Password string
DbName string
SolvesTable string
SolvesID string
SolvesPuzzleID string
SolvesTotalTime string
SolvesPenaltyTime string
SolvesUser string
PuzzlesTable string
PuzzlesID string
PuzzlesDifficulty string
PuzzlesName string
PuzzlesPuzzle string
}
var config dbConfig
type solve struct {
puzzleID int
totalTime int
penaltyTime int
}
type userSolvesCollection struct {
solves []solve
max int
min int
idPosition map[int]int
}
type puzzle struct {
id int
userRelativeDifficulty float32
difficultyRating int
name string
puzzle string
}
type puzzles []puzzle
type byUserRelativeDifficulty struct {
puzzles
}
func (self puzzles) Len() int {
return len(self)
}
func (self puzzles) Swap(i, j int) {
self[i], self[j] = self[j], self[i]
}
func (self byUserRelativeDifficulty) Less(i, j int) bool {
return self.puzzles[i].userRelativeDifficulty < self.puzzles[j].userRelativeDifficulty
}
type bySolveTimeDsc []solve
func (self bySolveTimeDsc) Len() int {
return len(self)
}
func (self bySolveTimeDsc) Swap(i, j int) {
self[i], self[j] = self[j], self[i]
}
func (self bySolveTimeDsc) Less(i, j int) bool {
//For the purposes of this algorithm, the "best" has to be lowest rank.
return self[i].totalTime > self[j].totalTime
}
func (self *userSolvesCollection) addSolve(solve solve) bool {
//Cull obviously incorrect solves.
if solve.totalTime == 0 {
return false
}
//Cull solves that leaned too heavily on hints.
if float64(solve.penaltyTime)/float64(solve.totalTime) > cullCheaterPercentageFlag {
return false
}
self.solves = append(self.solves, solve)
if len(self.solves) == 1 {
self.max = solve.totalTime
self.min = solve.totalTime
} else {
if self.max < solve.totalTime {
self.max = solve.totalTime
}
if self.min > solve.totalTime {
self.min = solve.totalTime
}
}
return true
}
//Whehter or not this should be included in calculation.
//Basically, whether the reltaiveDifficulties will all be valid.
//Normally this returns false if there is only one solve by the user, but could also
//happen when there are multiple solves but (crazily enough) they all have exactly the same solveTime.
//This DOES happen in the production dataset.
func (self *userSolvesCollection) valid() bool {
if self.max == self.min {
return false
}
if len(self.solves) < minimumSolvesFlag {
return false
}
return true
}
func (self *userSolvesCollection) relativeDifficulties() map[int]float32 {
//Returns a map of puzzle id to relative difficulty, normalized by our max and min.
avgSolveTimes := make(map[int]float32)
//Keep track of how many times we've seen each puzzle solved by this user so we can do correct averaging.
avgSolveTimesCount := make(map[int]int)
//First, collect the average solve time (in case the same user has solved more than once the same puzzle)
for _, solve := range self.solves {
currentAvgSolveTime := avgSolveTimes[solve.puzzleID]
avgSolveTimes[solve.puzzleID] = (currentAvgSolveTime*float32(avgSolveTimesCount[solve.puzzleID]) + float32(solve.totalTime)) / float32(avgSolveTimesCount[solve.puzzleID]+1)
avgSolveTimesCount[solve.puzzleID]++
}
//Now, relativize all of the scores.
result := make(map[int]float32)
for puzzleID, avgSolveTime := range avgSolveTimes {
result[puzzleID] = (avgSolveTime - float32(self.min)) / float32(self.max-self.min)
}
return result
}
func main() {
flag.Parse()
file, err := os.Open(_DB_CONFIG_FILENAME)
if err != nil {
log.Fatal("Could not find the config file at ", _DB_CONFIG_FILENAME, ". You should copy the SAMPLE one to that filename and configure.")
os.Exit(1)
}
defer file.Close()
decoder := json.NewDecoder(file)
if err := decoder.Decode(&config); err != nil {
log.Fatal("There was an error parsing JSON from the config file: ", err)
os.Exit(1)
}
difficutlyRatingsChan := make(chan map[int]puzzle)
go getPuzzleDifficultyRatings(difficutlyRatingsChan)
var db mysql.Conn
if useMockData {
db = &mockConnection{}
} else {
db = mysql.New("tcp", "", config.Url, config.Username, config.Password, config.DbName)
}
if err := db.Connect(); err != nil {
log.Fatal(err)
os.Exit(1)
}
var solvesQuery string
if noLimitFlag {
log.Println("Running without a limit for number of solves to retrieve.")
solvesQuery = "select %s, %s, %s, %s from %s"
} else {
log.Println("Running with a limit of ", queryLimit, " for number of solves to retrieve.")
solvesQuery = "select %s, %s, %s, %s from %s limit " + strconv.Itoa(queryLimit)
}
res, err := db.Start(solvesQuery, config.SolvesUser, config.SolvesPuzzleID, config.SolvesTotalTime, config.SolvesPenaltyTime, config.SolvesTable)
if err != nil {
log.Fatal(err)
os.Exit(1)
}
solvesByUser := make(map[string]*userSolvesCollection)
var userSolves *userSolvesCollection
var ok bool
var i int
var skippedSolves int
var skippedDuplicateSolves int
seenRows := make(map[string]bool)
//First, process all user records in the DB to collect all solves by userName.
for {
row, _ := res.GetRow()
if row == nil {
break
}
i++
rowHashValue := fmt.Sprintf("%v", row)
if _, seen := seenRows[rowHashValue]; seen {
skippedDuplicateSolves++
continue
} else {
seenRows[rowHashValue] = true
}
userSolves, ok = solvesByUser[row.Str(0)]
if !ok {
userSolves = new(userSolvesCollection)
userSolves.idPosition = make(map[int]int)
solvesByUser[row.Str(0)] = userSolves
}
if !userSolves.addSolve(solve{row.Int(1), row.Int(2), row.Int(3)}) {
skippedSolves++
}
}
log.Println("Processed ", i, " solves by ", len(solvesByUser), " users.")
log.Println("Skipped ", skippedSolves, " solves that cheated too much.")
log.Println("Skipped ", skippedDuplicateSolves, " solves because they were duplicates of solves seen earlier.")
//Now get the relative difficulty for each user's puzzles, and collect them.
relativeDifficultiesByPuzzle := make(map[int][]float32)
collectionByPuzzle := make(map[int]map[*userSolvesCollection]bool)
var skippedUsers int
for _, collection := range solvesByUser {
/*
//TODO: consider removing this logic and all of skipped users totally.
if !collection.valid() {
skippedUsers++
continue
}
*/
for puzzleID, relativeDifficulty := range collection.relativeDifficulties() {
relativeDifficultiesByPuzzle[puzzleID] = append(relativeDifficultiesByPuzzle[puzzleID], relativeDifficulty)
}
sort.Sort(bySolveTimeDsc(collection.solves))
for i, puzzle := range collection.solves {
collection.idPosition[puzzle.puzzleID] = i
collectionMap, ok := collectionByPuzzle[puzzle.puzzleID]
if !ok {
collectionMap = make(map[*userSolvesCollection]bool)
}
collectionMap[collection] = true
collectionByPuzzle[puzzle.puzzleID] = collectionMap
}
}
//Now, create the Markov Transition Matrix, according to algorithm MC4 of http://www.wisdom.weizmann.ac.il/~naor/PAPERS/rank_www10.html
//We start by creating a stacked array of float64's that we'll pass to the matrix library.
numPuzzles := len(collectionByPuzzle)
log.Println("Discovered", numPuzzles, "puzzles.")
matrixData := make([][]float64, numPuzzles)
for i := range matrixData {
matrixData[i] = make([]float64, numPuzzles)
}
//Now we will associate each observed puzzleID with an index that it will be associated with in the matrix.
puzzleIndex := make([]int, numPuzzles)
counter := 0
for key, _ := range collectionByPuzzle {
puzzleIndex[counter] = key
counter++
}
//Now we start to build up the matrix according to the MC4 algorithm.
for i := 0; i < numPuzzles; i++ {
for j := 0; j < numPuzzles; j++ {
if i == j {
//The special case; stay in the same state. We'll treat it specially.
continue
}
//Convert the zero-index into the puzzle ID we're actually interested in.
p := puzzleIndex[i]
q := puzzleIndex[j]
//Find the intersection of userSolveCollections that contain both p and q.
pMap := collectionByPuzzle[p]
qMap := collectionByPuzzle[q]
var intersection []*userSolvesCollection
for collection, _ := range pMap {
if _, ok := qMap[collection]; ok {
intersection = append(intersection, collection)
}
}
//Next, calculate how many of the collections have q ranked better (lower!) than p.
count := 0
for _, collection := range intersection {
if collection.idPosition[q] < collection.idPosition[p] {
count++
}
}
//Is it a majority? if so, transition. if not, leave at 0.
if count > (len(intersection) / 2) {
matrixData[i][j] = 1.0
}
}
}
//Go through and normalize the probabilities in each row to sum to 1.
for i := 0; i < numPuzzles; i++ {
//Count the number of rows that are 1.0.
count := 0
for j := 0; j < numPuzzles; j++ {
if matrixData[i][j] > 0.0 {
count++
}
}
probability := 1.0 / float64(numPuzzles)
for j := 0; j < numPuzzles; j++ {
if i == j {
//The stay in the same space probability
matrixData[i][j] = float64(numPuzzles-count) * probability
} else if matrixData[i][j] > 0.0 {
matrixData[i][j] = probability
}
}
}
//Create an actual matrix with the data.
markovChain := matrix.MakeDenseMatrixStacked(matrixData)
for i := 0; i < 20; i++ {
markovChain = matrix.ParallelProduct(markovChain, markovChain)
//Are the rows converged enough for us to bail?
difference := 0.0
for i := 0; i < numPuzzles; i++ {
difference += math.Abs(markovChain.Get(0, i) - markovChain.Get(1, i))
}
if difference < 0.0001 {
log.Println("The markov chain converged after", i+1, "mulitplications.")
break
}
}
log.Println("Skipped ", skippedUsers, " users because they did not have enough solve times.")
puzzles := make([]puzzle, len(relativeDifficultiesByPuzzle))
var index int
for puzzleID, difficulties := range relativeDifficultiesByPuzzle {
var sum float32
for _, difficulty := range difficulties {
sum += difficulty
}
puzzles[index] = puzzle{id: puzzleID, userRelativeDifficulty: sum / float32(len(difficulties)), difficultyRating: -1}
index++
}
//Sort the puzzles by relative user difficulty
//We actually don't need the wrapper, since it will modify the underlying slice.
sort.Sort(byUserRelativeDifficulty{puzzles})
//Merge in the difficulty ratings from the server.
difficultyRatings := <-difficutlyRatingsChan
for i, puzzle := range puzzles {
info, ok := difficultyRatings[puzzle.id]
if ok {
puzzle.difficultyRating = info.difficultyRating
puzzle.name = info.name
puzzle.puzzle = info.puzzle
}
//It's not a pointer so we have to copy it back.
puzzles[i] = puzzle
}
//Now print the results to stdout.
csvOut := csv.NewWriter(os.Stdout)
for _, puzzle := range puzzles {
temp := []string{strconv.Itoa(puzzle.id), strconv.Itoa(puzzle.difficultyRating), fmt.Sprintf("%g", puzzle.userRelativeDifficulty), puzzle.name}
if printPuzzleDataFlag {
temp = append(temp, puzzle.puzzle)
}
csvOut.Write(temp)
}
csvOut.Flush()
}
func getPuzzleDifficultyRatings(result chan map[int]puzzle) {
var db mysql.Conn
if useMockData {
db = &mockConnection{}
} else {
db = mysql.New("tcp", "", config.Url, config.Username, config.Password, config.DbName)
}
if err := db.Connect(); err != nil {
log.Fatal(err)
os.Exit(1)
}
res, err := db.Start("select %s, %s, %s, %s from %s", config.PuzzlesID, config.PuzzlesDifficulty, config.PuzzlesName, config.PuzzlesPuzzle, config.PuzzlesTable)
if err != nil {
log.Fatal(err)
os.Exit(1)
}
puzzles := make(map[int]puzzle)
for {
row, _ := res.GetRow()
if row == nil {
break
}
puzzles[row.Int(0)] = puzzle{id: row.Int(0), difficultyRating: row.Int(1), name: row.Str(2), puzzle: row.Str(3)}
}
result <- puzzles
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
NamespaceDefault string = "default"
// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
NamespaceAll string = ""
// NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats)
NamespaceNodeLease string = "kube-node-lease"
)
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
type Volume struct {
// name of the volume.
// Must be a DNS_LABEL and unique within the pod.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// volumeSource represents the location and type of the mounted volume.
// If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"`
}
// Represents the source of a volume to mount.
// Only one of its members may be specified.
type VolumeSource struct {
// hostPath represents a pre-existing file or directory on the host
// machine that is directly exposed to the container. This is generally
// used for system agents or other privileged things that are allowed
// to see the host machine. Most containers will NOT need this.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// ---
// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
// mount host directories as read/write.
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"`
// emptyDir represents a temporary directory that shares a pod's lifetime.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
// gcePersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
// awsElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
// gitRepo represents a git repository at a particular revision.
// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
// into the Pod's container.
// +optional
GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"`
// secret represents a secret that should populate this volume.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"`
// nfs represents an NFS mount on the host that shares a pod's lifetime
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
// iscsi represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://examples.k8s.io/volumes/iscsi/README.md
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
// persistentVolumeClaimVolumeSource represents a reference to a
// PersistentVolumeClaim in the same namespace.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://examples.k8s.io/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
// flexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// cinder represents a cinder volume attached and mounted on kubelets host machine.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
// flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
// downwardAPI represents downward API about the pod that should populate this volume
// +optional
DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"`
// fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
// configMap represents a configMap that should populate this volume
// +optional
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
// quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
// projected items for all in one resources secrets, configmaps, and downward API
Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
// portworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
// storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
// +optional
StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
// csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
// +optional
CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"`
// ephemeral represents a volume that is handled by a cluster storage driver.
// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
// and deleted when the pod is removed.
//
// Use this if:
// a) the volume is only needed while the pod runs,
// b) features of normal volumes like restoring from snapshot or capacity
// tracking are needed,
// c) the storage driver is specified through a storage class, and
// d) the storage driver supports dynamic volume provisioning through
// a PersistentVolumeClaim (see EphemeralVolumeSource for more
// information on the connection between this volume type
// and PersistentVolumeClaim).
//
// Use PersistentVolumeClaim or one of the vendor-specific
// APIs for volumes that persist for longer than the lifecycle
// of an individual pod.
//
// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
// be used that way - see the documentation of the driver for
// more information.
//
// A pod can use both types of ephemeral volumes and
// persistent volumes at the same time.
//
// +optional
Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
// This volume finds the bound PV and mounts that volume for the pod. A
// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
// type of volume that is owned by someone else (the system).
type PersistentVolumeClaimVolumeSource struct {
// claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"`
// readOnly Will force the ReadOnly setting in VolumeMounts.
// Default false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
}
// PersistentVolumeSource is similar to VolumeSource but meant for the
// administrator who creates PVs. Exactly one of its members must be set.
type PersistentVolumeSource struct {
// gcePersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
// awsElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
// hostPath represents a directory on the host.
// Provisioned by a developer or tester.
// This is useful for single-node development and testing only!
// On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
// glusterfs represents a Glusterfs volume that is attached to a host and
// exposed to the pod. Provisioned by an admin.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
// nfs represents an NFS mount on the host. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://examples.k8s.io/volumes/rbd/README.md
// +optional
RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
// iscsi represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// +optional
ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
// cinder represents a cinder volume attached and mounted on kubelets host machine.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
// fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
// flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
// flexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
// +optional
FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
// quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
// portworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
// local represents directly-attached storage with node affinity
// +optional
Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
// storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
// More info: https://examples.k8s.io/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
// csi represents storage that is handled by an external CSI driver (Beta feature).
// +optional
CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
}
const (
// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation.
// It's currently still used and will be held for backwards compatibility
BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
// MountOptionAnnotation defines mount option annotation used in PVs
MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
// It is analogous to a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
type PersistentVolume struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec defines a specification of a persistent volume owned by the cluster.
// Provisioned by an administrator.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// status represents the current information/status for the persistent volume.
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// PersistentVolumeSpec is the specification of a persistent volume.
type PersistentVolumeSpec struct {
// capacity is the description of the persistent volume's resources and capacity.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// persistentVolumeSource is the actual volume backing the persistent volume.
PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"`
// accessModes contains all ways the volume can be mounted.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
// Expected to be non-nil when bound.
// claim.VolumeName is the authoritative bind between PV and PVC.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
// +optional
ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"`
// persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim.
// Valid options are Retain (default for manually created PersistentVolumes), Delete (default
// for dynamically provisioned PersistentVolumes), and Recycle (deprecated).
// Recycle must be supported by the volume plugin underlying this PersistentVolume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
// +optional
PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
// storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value
// means that this volume does not belong to any StorageClass.
// +optional
StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"`
// mountOptions is the list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
// simply fail if one is invalid.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
// +optional
MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
// volumeMode defines if a volume is intended to be used with a formatted filesystem
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
// nodeAffinity defines constraints that limit what nodes this volume can be accessed from.
// This field influences the scheduling of pods that use this volume.
// +optional
NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"`
}
// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
type VolumeNodeAffinity struct {
// required specifies hard node constraints that must be met.
Required *NodeSelector `json:"required,omitempty" protobuf:"bytes,1,opt,name=required"`
}
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
// +enum
type PersistentVolumeReclaimPolicy string
const (
// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
// The volume plugin must support Recycling.
PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
// The volume plugin must support Deletion.
PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
// PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator.
// The default policy is Retain.
PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
)
// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem.
// +enum
type PersistentVolumeMode string
const (
// PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device.
PersistentVolumeBlock PersistentVolumeMode = "Block"
// PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem.
PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem"
)
// PersistentVolumeStatus is the current status of a persistent volume.
type PersistentVolumeStatus struct {
// phase indicates if a volume is available, bound to a claim, or released by a claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
// +optional
Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"`
// message is a human-readable message indicating details about why the volume is in this state.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
// reason is a brief CamelCase string that describes any failure and is meant
// for machine parsing and tidy display in the CLI.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeList is a list of PersistentVolume items.
type PersistentVolumeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is a list of persistent volumes.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaim is a user's request for and claim to a persistent volume
type PersistentVolumeClaim struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec defines the desired characteristics of a volume requested by a pod author.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// status represents the current information/status of a persistent volume claim.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
type PersistentVolumeClaimList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is a list of persistent volume claims.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PersistentVolumeClaimSpec describes the common attributes of storage devices
// and allows a Source for provider-specific attributes
type PersistentVolumeClaimSpec struct {
// accessModes contains the desired access modes the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// selector is a label query over volumes to consider for binding.
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
// resources represents the minimum resources the volume should have.
// If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
// that are lower than previous value but must still be higher than capacity recorded in the
// status field of the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
// volumeName is the binding reference to the PersistentVolume backing this claim.
// +optional
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
// storageClassName is the name of the StorageClass required by the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
// +optional
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
// dataSource field can be used to specify either:
// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
// * An existing PVC (PersistentVolumeClaim)
// If the provisioner or an external controller can support the specified data source,
// it will create a new volume based on the contents of the specified data source.
// If the AnyVolumeDataSource feature gate is enabled, this field will always have
// the same contents as the DataSourceRef field.
// +optional
DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"`
// dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
// volume is desired. This may be any local object from a non-empty API group (non
// core object) or a PersistentVolumeClaim object.
// When this field is specified, volume binding will only succeed if the type of
// the specified object matches some installed volume populator or dynamic
// provisioner.
// This field will replace the functionality of the DataSource field and as such
// if both fields are non-empty, they must have the same value. For backwards
// compatibility, both fields (DataSource and DataSourceRef) will be set to the same
// value automatically if one of them is empty and the other is non-empty.
// There are two important differences between DataSource and DataSourceRef:
// * While DataSource only allows two specific types of objects, DataSourceRef
// allows any non-core object, as well as PersistentVolumeClaim objects.
// * While DataSource ignores disallowed values (dropping them), DataSourceRef
// preserves all values, and generates an error if a disallowed value is
// specified.
// (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
// +optional
DataSourceRef *TypedLocalObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"`
}
// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
type PersistentVolumeClaimConditionType string
const (
// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
)
// +enum
type PersistentVolumeClaimResizeStatus string
const (
// When expansion is complete, the empty string is set by resize controller or kubelet.
PersistentVolumeClaimNoExpansionInProgress PersistentVolumeClaimResizeStatus = ""
// State set when resize controller starts expanding the volume in control-plane
PersistentVolumeClaimControllerExpansionInProgress PersistentVolumeClaimResizeStatus = "ControllerExpansionInProgress"
// State set when expansion has failed in resize controller with a terminal error.
// Transient errors such as timeout should not set this status and should leave ResizeStatus
// unmodified, so as resize controller can resume the volume expansion.
PersistentVolumeClaimControllerExpansionFailed PersistentVolumeClaimResizeStatus = "ControllerExpansionFailed"
// State set when resize controller has finished expanding the volume but further expansion is needed on the node.
PersistentVolumeClaimNodeExpansionPending PersistentVolumeClaimResizeStatus = "NodeExpansionPending"
// State set when kubelet starts expanding the volume.
PersistentVolumeClaimNodeExpansionInProgress PersistentVolumeClaimResizeStatus = "NodeExpansionInProgress"
// State set when expansion has failed in kubelet with a terminal error. Transient errors don't set NodeExpansionFailed.
PersistentVolumeClaimNodeExpansionFailed PersistentVolumeClaimResizeStatus = "NodeExpansionFailed"
)
// PersistentVolumeClaimCondition contails details about state of pvc
type PersistentVolumeClaimCondition struct {
Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// lastProbeTime is the time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// lastTransitionTime is the time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// reason is a unique, this should be a short, machine understandable string that gives the reason
// for condition's last transition. If it reports "ResizeStarted" that means the underlying
// persistent volume is being resized.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// message is the human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
type PersistentVolumeClaimStatus struct {
// phase represents the current phase of PersistentVolumeClaim.
// +optional
Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"`
// accessModes contains the actual access modes the volume backing the PVC has.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// capacity represents the actual resources of the underlying volume.
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// conditions is the current Condition of persistent volume claim. If underlying persistent volume is being
// resized then the Condition will be set to 'ResizeStarted'.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
// allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may
// be larger than the actual capacity when a volume expansion operation is requested.
// For storage quota, the larger value from allocatedResources and PVC.spec.resources is used.
// If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation.
// If a volume expansion capacity request is lowered, allocatedResources is only
// lowered if there are no expansion operations in progress and if the actual volume capacity
// is equal or lower than the requested capacity.
// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
// +featureGate=RecoverVolumeExpansionFailure
// +optional
AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,5,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"`
// resizeStatus stores status of resize operation.
// ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty
// string by resize controller or kubelet.
// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
// +featureGate=RecoverVolumeExpansionFailure
// +optional
ResizeStatus *PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty" protobuf:"bytes,6,opt,name=resizeStatus,casttype=PersistentVolumeClaimResizeStatus"`
}
// +enum
type PersistentVolumeAccessMode string
const (
// can be mounted in read/write mode to exactly 1 host
ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
// can be mounted in read-only mode to many hosts
ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
// can be mounted in read/write mode to many hosts
ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
// can be mounted in read/write mode to exactly 1 pod
// cannot be used in combination with other access modes
ReadWriteOncePod PersistentVolumeAccessMode = "ReadWriteOncePod"
)
// +enum
type PersistentVolumePhase string
const (
// used for PersistentVolumes that are not available
VolumePending PersistentVolumePhase = "Pending"
// used for PersistentVolumes that are not yet bound
// Available volumes are held by the binder and matched to PersistentVolumeClaims
VolumeAvailable PersistentVolumePhase = "Available"
// used for PersistentVolumes that are bound
VolumeBound PersistentVolumePhase = "Bound"
// used for PersistentVolumes where the bound PersistentVolumeClaim was deleted
// released volumes must be recycled before becoming available again
// this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource
VolumeReleased PersistentVolumePhase = "Released"
// used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim
VolumeFailed PersistentVolumePhase = "Failed"
)
// +enum
type PersistentVolumeClaimPhase string
const (
// used for PersistentVolumeClaims that are not yet bound
ClaimPending PersistentVolumeClaimPhase = "Pending"
// used for PersistentVolumeClaims that are bound
ClaimBound PersistentVolumeClaimPhase = "Bound"
// used for PersistentVolumeClaims that lost their underlying
// PersistentVolume. The claim was bound to a PersistentVolume and this
// volume does not exist any longer and all data on it was lost.
ClaimLost PersistentVolumeClaimPhase = "Lost"
)
// +enum
type HostPathType string
const (
// For backwards compatible, leave it empty if unset
HostPathUnset HostPathType = ""
// If nothing exists at the given path, an empty directory will be created there
// as needed with file mode 0755, having the same group and ownership with Kubelet.
HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate"
// A directory must exist at the given path
HostPathDirectory HostPathType = "Directory"
// If nothing exists at the given path, an empty file will be created there
// as needed with file mode 0644, having the same group and ownership with Kubelet.
HostPathFileOrCreate HostPathType = "FileOrCreate"
// A file must exist at the given path
HostPathFile HostPathType = "File"
// A UNIX socket must exist at the given path
HostPathSocket HostPathType = "Socket"
// A character device must exist at the given path
HostPathCharDev HostPathType = "CharDevice"
// A block device must exist at the given path
HostPathBlockDev HostPathType = "BlockDevice"
)
// Represents a host path mapped into a pod.
// Host path volumes do not support ownership management or SELinux relabeling.
type HostPathVolumeSource struct {
// path of the directory on the host.
// If the path is a symlink, it will follow the link to the real path.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// type for HostPath Volume
// Defaults to ""
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
}
// Represents an empty directory for a pod.
// Empty directory volumes support ownership management and SELinux relabeling.
type EmptyDirVolumeSource struct {
// medium represents what type of storage medium should back this directory.
// The default is "" which means to use the node's default medium.
// Must be an empty string (default) or Memory.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"`
// sizeLimit is the total amount of local storage required for this EmptyDir volume.
// The size limit is also applicable for memory medium.
// The maximum usage on memory medium EmptyDir would be the minimum value between
// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
// The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// +optional
SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"`
}
// Represents a Glusterfs mount that lasts the lifetime of a pod.
// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsVolumeSource struct {
// endpoints is the endpoint name that details Glusterfs topology.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
// path is the Glusterfs volume path.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
// Defaults to false.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Glusterfs mount that lasts the lifetime of a pod.
// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsPersistentVolumeSource struct {
// endpoints is the endpoint name that details Glusterfs topology.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
// path is the Glusterfs volume path.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
// Defaults to false.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// endpointsNamespace is the namespace that contains Glusterfs endpoint.
// If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
// +optional
EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDVolumeSource struct {
// monitors is a collection of Ceph monitors.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// image is the rados image name.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// fsType is the filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// pool is the rados pool name.
// Default is rbd.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// user is the rados user name.
// Default is admin.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// secretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// readOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDPersistentVolumeSource struct {
// monitors is a collection of Ceph monitors.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// image is the rados image name.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// fsType is the filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// pool is the rados pool name.
// Default is rbd.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// user is the rados user name.
// Default is admin.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// secretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// readOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
// Cinder volumes support ownership management and SELinux relabeling.
type CinderVolumeSource struct {
// volumeID used to identify the volume in cinder.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// secretRef is optional: points to a secret object containing parameters used to connect
// to OpenStack.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"`
}
// Represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
// Cinder volumes support ownership management and SELinux relabeling.
type CinderPersistentVolumeSource struct {
// volumeID used to identify the volume in cinder.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// fsType Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// secretRef is Optional: points to a secret object containing parameters used to connect
// to OpenStack.
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSVolumeSource struct {
// monitors is Required: Monitors is a collection of Ceph monitors
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// user is optional: User is the rados user name, default is admin
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// SecretReference represents a Secret Reference. It has enough information to retrieve secret
// in any namespace
// +structType=atomic
type SecretReference struct {
// name is unique within a namespace to reference a secret resource.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// namespace defines the space within which the secret name must be unique.
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSPersistentVolumeSource struct {
// monitors is Required: Monitors is a collection of Ceph monitors
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// user is Optional: User is the rados user name, default is admin
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// Represents a Flocker volume mounted by the Flocker agent.
// One and only one of datasetName and datasetUUID should be set.
// Flocker volumes do not support ownership management or SELinux relabeling.
type FlockerVolumeSource struct {
// datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
// should be considered as deprecated
// +optional
DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"`
// datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset
// +optional
DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"`
}
// StorageMedium defines ways that storage can be allocated to a volume.
type StorageMedium string
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this
StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux)
StorageMediumHugePages StorageMedium = "HugePages" // use hugepages
StorageMediumHugePagesPrefix StorageMedium = "HugePages-" // prefix for full medium notation HugePages-<size>
)
// Protocol defines network protocols supported for things like container ports.
// +enum
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
// ProtocolSCTP is the SCTP protocol.
ProtocolSCTP Protocol = "SCTP"
)
// Represents a Persistent Disk resource in Google Compute Engine.
//
// A GCE PD must exist before mounting to a container. The disk must
// also be in the same GCE project and zone as the kubelet. A GCE PD
// can only be mounted as read/write once or read-only many times. GCE
// PDs support ownership management and SELinux relabeling.
type GCEPersistentDiskVolumeSource struct {
// pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"`
// fsType is filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// partition is the partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// readOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a Quobyte mount that lasts the lifetime of a pod.
// Quobyte volumes do not support ownership management or SELinux relabeling.
type QuobyteVolumeSource struct {
// registry represents a single or multiple Quobyte Registry services
// specified as a string as host:port pair (multiple entries are separated with commas)
// which acts as the central registry for volumes
Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"`
// volume is a string that references an already created Quobyte volume by name.
Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"`
// readOnly here will force the Quobyte volume to be mounted with read-only permissions.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// user to map volume access to
// Defaults to serivceaccount user
// +optional
User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"`
// group to map volume access to
// Default is no group
// +optional
Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"`
// tenant owning the given Quobyte volume in the Backend
// Used with dynamically provisioned Quobyte volumes, value is set by the plugin
// +optional
Tenant string `json:"tenant,omitempty" protobuf:"bytes,6,opt,name=tenant"`
}
// FlexPersistentVolumeSource represents a generic persistent volume resource that is
// provisioned/attached using an exec based plugin.
type FlexPersistentVolumeSource struct {
// driver is the name of the driver to use for this volume.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// fsType is the Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// secretRef is Optional: SecretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
// readOnly is Optional: defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// options is Optional: this field holds extra command options if any.
// +optional
Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
}
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
type FlexVolumeSource struct {
// driver is the name of the driver to use for this volume.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// secretRef is Optional: secretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
// readOnly is Optional: defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// options is Optional: this field holds extra command options if any.
// +optional
Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
}
// Represents a Persistent Disk resource in AWS.
//
// An AWS EBS disk must exist before mounting to a container. The disk
// must also be in the same AWS zone as the kubelet. An AWS EBS disk
// can only be mounted as read/write once. AWS EBS volumes support
// ownership management and SELinux relabeling.
type AWSElasticBlockStoreVolumeSource struct {
// volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// fsType is the filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// partition is the partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// readOnly value true will force the readOnly setting in VolumeMounts.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a volume that is populated with the contents of a git repository.
// Git repo volumes do not support ownership management.
// Git repo volumes support SELinux relabeling.
//
// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
// into the Pod's container.
type GitRepoVolumeSource struct {
// repository is the URL
Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"`
// revision is the commit hash for the specified revision.
// +optional
Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
// directory is the target directory name.
// Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
// git repository. Otherwise, if specified, the volume will contain the git repository in
// the subdirectory with the given name.
// +optional
Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"`
}
// Adapts a Secret into a volume.
//
// The contents of the target Secret's Data field will be presented in a volume
// as files using the keys in the Data field as the file names.
// Secret volumes support ownership management and SELinux relabeling.
type SecretVolumeSource struct {
// secretName is the name of the secret in the pod's namespace to use.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"`
// items If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// defaultMode is Optional: mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values
// for mode bits. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"`
// optional field specify whether the Secret or its keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
SecretVolumeSourceDefaultMode int32 = 0644
)
// Adapts a secret into a projected volume.
//
// The contents of the target Secret's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names.
// Note that this is identical to a secret volume source without the default
// mode.
type SecretProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// items if unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// optional field specify whether the Secret or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents an NFS mount that lasts the lifetime of a pod.
// NFS volumes do not support ownership management or SELinux relabeling.
type NFSVolumeSource struct {
// server is the hostname or IP address of the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Server string `json:"server" protobuf:"bytes,1,opt,name=server"`
// path that is exported by the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// readOnly here will force the NFS export to be mounted with read-only permissions.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIVolumeSource struct {
// targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// iqn is the target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// lun represents iSCSI Target Lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// iscsiInterface is the interface Name that uses an iSCSI transport.
// Defaults to 'default' (tcp).
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// fsType is the filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// readOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// chapAuthSession defines whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// secretRef is the CHAP Secret for iSCSI target and initiator authentication
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// initiatorName is the custom iSCSI Initiator Name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// ISCSIPersistentVolumeSource represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIPersistentVolumeSource struct {
// targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// iqn is Target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// lun is iSCSI Target Lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// iscsiInterface is the interface Name that uses an iSCSI transport.
// Defaults to 'default' (tcp).
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// fsType is the filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// readOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// chapAuthSession defines whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// secretRef is the CHAP Secret for iSCSI target and initiator authentication
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// initiatorName is the custom iSCSI Initiator Name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// Represents a Fibre Channel volume.
// Fibre Channel volumes can only be mounted as read/write once.
// Fibre Channel volumes support ownership management and SELinux relabeling.
type FCVolumeSource struct {
// targetWWNs is Optional: FC target worldwide names (WWNs)
// +optional
TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"`
// lun is Optional: FC target lun number
// +optional
Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// wwids Optional: FC volume world wide identifiers (wwids)
// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
// +optional
WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFileVolumeSource struct {
// secretName is the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// shareName is the azure share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFilePersistentVolumeSource struct {
// secretName is the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// shareName is the azure Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key
// default is the same as the Pod
// +optional
SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"`
}
// Represents a vSphere volume resource.
type VsphereVirtualDiskVolumeSource struct {
// volumePath is the path that identifies vSphere volume vmdk
VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"`
// fsType is filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// storagePolicyName is the storage Policy Based Management (SPBM) profile name.
// +optional
StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"`
// storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
// +optional
StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"`
}
// Represents a Photon Controller persistent disk resource.
type PhotonPersistentDiskVolumeSource struct {
// pdID is the ID that identifies Photon Controller persistent disk
PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
// +enum
type AzureDataDiskCachingMode string
// +enum
type AzureDataDiskKind string
const (
AzureDataDiskCachingNone AzureDataDiskCachingMode = "None"
AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly"
AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite"
AzureSharedBlobDisk AzureDataDiskKind = "Shared"
AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated"
AzureManagedDisk AzureDataDiskKind = "Managed"
)
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
type AzureDiskVolumeSource struct {
// diskName is the Name of the data disk in the blob storage
DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"`
// diskURI is the URI of data disk in the blob storage
DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
// cachingMode is the Host Caching mode: None, Read Only, Read Write.
// +optional
CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
// fsType is Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// readOnly Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
// kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
}
// PortworxVolumeSource represents a Portworx volume resource.
type PortworxVolumeSource struct {
// volumeID uniquely identifies a Portworx volume
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// fSType represents the filesystem type to mount
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// ScaleIOVolumeSource represents a persistent ScaleIO volume
type ScaleIOVolumeSource struct {
// gateway is the host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// system is the name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// secretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// sslEnabled Flag enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// storagePool is the ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// Default is ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// volumeName is the name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs".
// Default is "xfs".
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// readOnly Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
type ScaleIOPersistentVolumeSource struct {
// gateway is the host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// system is the name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// secretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// sslEnabled is the flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// storagePool is the ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// Default is ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// volumeName is the name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs".
// Default is "xfs"
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSVolumeSource struct {
// volumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// volumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// secretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSPersistentVolumeSource struct {
// volumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// volumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// secretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// volume as files using the keys in the Data field as the file names, unless
// the items element is populated with specific mappings of keys to paths.
// ConfigMap volumes support ownership management and SELinux relabeling.
type ConfigMapVolumeSource struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// items if unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// defaultMode is optional: mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"`
// optional specify whether the ConfigMap or its keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
ConfigMapVolumeSourceDefaultMode int32 = 0644
)
// Adapts a ConfigMap into a projected volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names,
// unless the items element is populated with specific mappings of keys to paths.
// Note that this is identical to a configmap volume source without the default
// mode.
type ConfigMapProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// items if unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// optional specify whether the ConfigMap or its keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// ServiceAccountTokenProjection represents a projected service account token
// volume. This projection can be used to insert a service account token into
// the pods runtime filesystem for use against APIs (Kubernetes API Server or
// otherwise).
type ServiceAccountTokenProjection struct {
// audience is the intended audience of the token. A recipient of a token
// must identify itself with an identifier specified in the audience of the
// token, and otherwise should reject the token. The audience defaults to the
// identifier of the apiserver.
//+optional
Audience string `json:"audience,omitempty" protobuf:"bytes,1,rep,name=audience"`
// expirationSeconds is the requested duration of validity of the service
// account token. As the token approaches expiration, the kubelet volume
// plugin will proactively rotate the service account token. The kubelet will
// start trying to rotate the token if the token is older than 80 percent of
// its time to live or if the token is older than 24 hours.Defaults to 1 hour
// and must be at least 10 minutes.
//+optional
ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"`
// path is the path relative to the mount point of the file to project the
// token into.
Path string `json:"path" protobuf:"bytes,3,opt,name=path"`
}
// Represents a projected volume source
type ProjectedVolumeSource struct {
// sources is the list of volume projections
// +optional
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
// defaultMode are the mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
// Projection that may be projected along with other supported volume types
type VolumeProjection struct {
// all types below are the supported types for projection into the same volume
// secret information about the secret data to project
// +optional
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
// downwardAPI information about the downwardAPI data to project
// +optional
DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"`
// configMap information about the configMap data to project
// +optional
ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
// serviceAccountToken is information about the serviceAccountToken data to project
// +optional
ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"`
}
const (
ProjectedVolumeSourceDefaultMode int32 = 0644
)
// Maps a string key to a path within a volume.
type KeyToPath struct {
// key is the key to project.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// path is the relative path of the file to map the key to.
// May not be an absolute path.
// May not contain the path element '..'.
// May not start with the string '..'.
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// mode is Optional: mode bits used to set permissions on this file.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
}
// Local represents directly-attached storage with node affinity (Beta feature)
type LocalVolumeSource struct {
// path of the full path to the volume on the node.
// It can be either a directory or block device (disk, partition, ...).
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// fsType is the filesystem type to mount.
// It applies only when the Path is a block device.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified.
// +optional
FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
// Represents storage that is managed by an external CSI volume driver (Beta feature)
type CSIPersistentVolumeSource struct {
// driver is the name of the driver to use for this volume.
// Required.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// volumeHandle is the unique volume name returned by the CSI volume
// plugin’s CreateVolume to refer to the volume on all subsequent calls.
// Required.
VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"`
// readOnly value to pass to ControllerPublishVolumeRequest.
// Defaults to false (read/write).
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// fsType to mount. Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs".
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// volumeAttributes of the volume to publish.
// +optional
VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,5,rep,name=volumeAttributes"`
// controllerPublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerPublishVolume and ControllerUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"`
// nodeStageSecretRef is a reference to the secret object containing sensitive
// information to pass to the CSI driver to complete the CSI NodeStageVolume
// and NodeStageVolume and NodeUnstageVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"`
// nodePublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodePublishVolume and NodeUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"`
// controllerExpandSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerExpandVolume call.
// This is an alpha field and requires enabling ExpandCSIVolumes feature gate.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"`
}
// Represents a source location of a volume to mount, managed by an external CSI driver
type CSIVolumeSource struct {
// driver is the name of the CSI driver that handles this volume.
// Consult with your admin for the correct name as registered in the cluster.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// readOnly specifies a read-only configuration for the volume.
// Defaults to false (read/write).
// +optional
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
// fsType to mount. Ex. "ext4", "xfs", "ntfs".
// If not provided, the empty value is passed to the associated CSI driver
// which will determine the default filesystem to apply.
// +optional
FSType *string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// volumeAttributes stores driver-specific properties that are passed to the CSI
// driver. Consult your driver's documentation for supported values.
// +optional
VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,4,rep,name=volumeAttributes"`
// nodePublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodePublishVolume and NodeUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secret references are passed.
// +optional
NodePublishSecretRef *LocalObjectReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,5,opt,name=nodePublishSecretRef"`
}
// Represents an ephemeral volume that is handled by a normal storage driver.
type EphemeralVolumeSource struct {
// Will be used to create a stand-alone PVC to provision the volume.
// The pod in which this EphemeralVolumeSource is embedded will be the
// owner of the PVC, i.e. the PVC will be deleted together with the
// pod. The name of the PVC will be `<pod name>-<volume name>` where
// `<volume name>` is the name from the `PodSpec.Volumes` array
// entry. Pod validation will reject the pod if the concatenated name
// is not valid for a PVC (for example, too long).
//
// An existing PVC with that name that is not owned by the pod
// will *not* be used for the pod to avoid using an unrelated
// volume by mistake. Starting the pod is then blocked until
// the unrelated PVC is removed. If such a pre-created PVC is
// meant to be used by the pod, the PVC has to updated with an
// owner reference to the pod once the pod exists. Normally
// this should not be necessary, but it may be useful when
// manually reconstructing a broken cluster.
//
// This field is read-only and no changes will be made by Kubernetes
// to the PVC after it has been created.
//
// Required, must not be nil.
VolumeClaimTemplate *PersistentVolumeClaimTemplate `json:"volumeClaimTemplate,omitempty" protobuf:"bytes,1,opt,name=volumeClaimTemplate"`
// ReadOnly is tombstoned to show why 2 is a reserved protobuf tag.
// ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
}
// PersistentVolumeClaimTemplate is used to produce
// PersistentVolumeClaim objects as part of an EphemeralVolumeSource.
type PersistentVolumeClaimTemplate struct {
// May contain labels and annotations that will be copied into the PVC
// when creating it. No other fields are allowed and will be rejected during
// validation.
//
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The specification for the PersistentVolumeClaim. The entire content is
// copied unchanged into the PVC that gets created from this
// template. The same fields as in a PersistentVolumeClaim
// are also valid here.
Spec PersistentVolumeClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
}
// ContainerPort represents a network port in a single container.
type ContainerPort struct {
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
// named port in a pod must have a unique name. Name for the port that can be
// referred to by services.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Number of port to expose on the host.
// If specified, this must be a valid port number, 0 < x < 65536.
// If HostNetwork is specified, this must match ContainerPort.
// Most containers do not need this.
// +optional
HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"`
// Number of port to expose on the pod's IP address.
// This must be a valid port number, 0 < x < 65536.
ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"`
// Protocol for port. Must be UDP, TCP, or SCTP.
// Defaults to "TCP".
// +optional
// +default="TCP"
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
// What host IP to bind the external port to.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
}
// VolumeMount describes a mounting of a Volume within a container.
type VolumeMount struct {
// This must match the Name of a Volume.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Mounted read-only if true, read-write otherwise (false or unspecified).
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
// Path within the container at which the volume should be mounted. Must
// not contain ':'.
MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"`
// Path within the volume from which the container's volume should be mounted.
// Defaults to "" (volume's root).
// +optional
SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"`
// mountPropagation determines how mounts are propagated from the host
// to container and the other way around.
// When not set, MountPropagationNone is used.
// This field is beta in 1.10.
// +optional
MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"`
// Expanded path within the volume from which the container's volume should be mounted.
// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
// Defaults to "" (volume's root).
// SubPathExpr and SubPath are mutually exclusive.
// +optional
SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"`
}
// MountPropagationMode describes mount propagation.
// +enum
type MountPropagationMode string
const (
// MountPropagationNone means that the volume in a container will
// not receive new mounts from the host or other containers, and filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode corresponds to "private" in Linux terminology.
MountPropagationNone MountPropagationMode = "None"
// MountPropagationHostToContainer means that the volume in a container will
// receive new mounts from the host or other containers, but filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rslave" in Linux terminology).
MountPropagationHostToContainer MountPropagationMode = "HostToContainer"
// MountPropagationBidirectional means that the volume in a container will
// receive new mounts from the host or other containers, and its own mounts
// will be propagated from the container to the host or other containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rshared" in Linux terminology).
MountPropagationBidirectional MountPropagationMode = "Bidirectional"
)
// volumeDevice describes a mapping of a raw block device within a container.
type VolumeDevice struct {
// name must match the name of a persistentVolumeClaim in the pod
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// devicePath is the path inside of the container that the device will be mapped to.
DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"`
}
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Name of the environment variable. Must be a C_IDENTIFIER.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Optional: no more than one of the following may be specified.
// Variable references $(VAR_NAME) are expanded
// using the previously defined environment variables in the container and
// any service environment variables. If a variable cannot be resolved,
// the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
// "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
// Escaped references will never be expanded, regardless of whether the variable
// exists or not.
// Defaults to "".
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Source for the environment variable's value. Cannot be used if value is not empty.
// +optional
ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"`
}
// EnvVarSource represents a source for the value of an EnvVar.
type EnvVarSource struct {
// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['<KEY>']`, `metadata.annotations['<KEY>']`,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"`
// Selects a key of a ConfigMap.
// +optional
ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"`
// Selects a key of a secret in the pod's namespace
// +optional
SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
}
// ObjectFieldSelector selects an APIVersioned field of an object.
// +structType=atomic
type ObjectFieldSelector struct {
// Version of the schema the FieldPath is written in terms of, defaults to "v1".
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
// Path of the field to select in the specified API version.
FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"`
}
// ResourceFieldSelector represents container resources (cpu, memory) and their output format
// +structType=atomic
type ResourceFieldSelector struct {
// Container name: required for volumes, optional for env vars
// +optional
ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
// Required: resource to select
Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
// Specifies the output format of the exposed resources, defaults to "1"
// +optional
Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"`
}
// Selects a key from a ConfigMap.
// +structType=atomic
type ConfigMapKeySelector struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key to select.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the ConfigMap or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// SecretKeySelector selects a key of a Secret.
// +structType=atomic
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key of the secret to select from. Must be a valid secret key.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the Secret or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// EnvFromSource represents the source of a set of ConfigMaps
type EnvFromSource struct {
// An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
// +optional
Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
// The ConfigMap to select from
// +optional
ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"`
// The Secret to select from
// +optional
SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
}
// ConfigMapEnvSource selects a ConfigMap to populate the environment
// variables with.
//
// The contents of the target ConfigMap's Data field will represent the
// key-value pairs as environment variables.
type ConfigMapEnvSource struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the ConfigMap must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// SecretEnvSource selects a Secret to populate the environment
// variables with.
//
// The contents of the target Secret's Data field will represent the
// key-value pairs as environment variables.
type SecretEnvSource struct {
// The Secret to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the Secret must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// HTTPHeader describes a custom header to be used in HTTP probes
type HTTPHeader struct {
// The header field name
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// The header field value
Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
}
// HTTPGetAction describes an action based on HTTP Get requests.
type HTTPGetAction struct {
// Path to access on the HTTP server.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
// Name or number of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"`
// Host name to connect to, defaults to the pod IP. You probably want to set
// "Host" in httpHeaders instead.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"`
// Scheme to use for connecting to the host.
// Defaults to HTTP.
// +optional
Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"`
// Custom headers to set in the request. HTTP allows repeated headers.
// +optional
HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"`
}
// URIScheme identifies the scheme used for connection to a host for Get actions
// +enum
type URIScheme string
const (
// URISchemeHTTP means that the scheme used will be http://
URISchemeHTTP URIScheme = "HTTP"
// URISchemeHTTPS means that the scheme used will be https://
URISchemeHTTPS URIScheme = "HTTPS"
)
// TCPSocketAction describes an action based on opening a socket
type TCPSocketAction struct {
// Number or name of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"`
// Optional: Host name to connect to, defaults to the pod IP.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
type GRPCAction struct {
// Port number of the gRPC service. Number must be in the range 1 to 65535.
Port int32 `json:"port" protobuf:"bytes,1,opt,name=port"`
// Service is the name of the service to place in the gRPC HealthCheckRequest
// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
//
// If this is not specified, the default behavior is defined by gRPC.
// +optional
// +default=""
Service *string `json:"service" protobuf:"bytes,2,opt,name=service"`
}
// ExecAction describes a "run in container" action.
type ExecAction struct {
// Command is the command line to execute inside the container, the working directory for the
// command is root ('/') in the container's filesystem. The command is simply exec'd, it is
// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
// a shell, you need to explicitly call out to that shell.
// Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
}
// Probe describes a health check to be performed against a container to determine whether it is
// alive or ready to receive traffic.
type Probe struct {
// The action taken to determine the health of a container
ProbeHandler `json:",inline" protobuf:"bytes,1,opt,name=handler"`
// Number of seconds after the container has started before liveness probes are initiated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"`
// Number of seconds after which the probe times out.
// Defaults to 1 second. Minimum value is 1.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
// How often (in seconds) to perform the probe.
// Default to 10 seconds. Minimum value is 1.
// +optional
PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"`
// Minimum consecutive successes for the probe to be considered successful after having failed.
// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
// +optional
SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"`
// Minimum consecutive failures for the probe to be considered failed after having succeeded.
// Defaults to 3. Minimum value is 1.
// +optional
FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"`
// Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
// value overrides the value provided by the pod spec.
// Value must be non-negative integer. The value zero indicates stop immediately via
// the kill signal (no opportunity to shut down).
// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,7,opt,name=terminationGracePeriodSeconds"`
}
// PullPolicy describes a policy for if/when to pull a container image
// +enum
type PullPolicy string
const (
// PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
PullAlways PullPolicy = "Always"
// PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
PullNever PullPolicy = "Never"
// PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
PullIfNotPresent PullPolicy = "IfNotPresent"
)
// PreemptionPolicy describes a policy for if/when to preempt a pod.
// +enum
type PreemptionPolicy string
const (
// PreemptLowerPriority means that pod can preempt other pods with lower priority.
PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority"
// PreemptNever means that pod never preempts other pods with lower priority.
PreemptNever PreemptionPolicy = "Never"
)
// TerminationMessagePolicy describes how termination messages are retrieved from a container.
// +enum
type TerminationMessagePolicy string
const (
// TerminationMessageReadFile is the default behavior and will set the container status message to
// the contents of the container's terminationMessagePath when the container exits.
TerminationMessageReadFile TerminationMessagePolicy = "File"
// TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs
// for the container status message when the container exits with an error and the
// terminationMessagePath has no contents.
TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError"
)
// Capability represent POSIX capabilities type
type Capability string
// Adds and removes POSIX capabilities from running containers.
type Capabilities struct {
// Added capabilities
// +optional
Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"`
// Removed capabilities
// +optional
Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"`
}
// ResourceRequirements describes the compute resource requirements.
type ResourceRequirements struct {
// Limits describes the maximum amount of compute resources allowed.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
// Requests describes the minimum amount of compute resources required.
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
// otherwise to an implementation-defined value.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
}
const (
// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
TerminationMessagePathDefault string = "/dev/termination-log"
)
// A single application container that you want to run within a pod.
type Container struct {
// Name of the container specified as a DNS_LABEL.
// Each container in a pod must have a unique name (DNS_LABEL).
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Container image name.
// More info: https://kubernetes.io/docs/concepts/containers/images
// This field is optional to allow higher level config management to default or override
// container images in workload controllers like Deployments and StatefulSets.
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The container image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The container image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Container's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of ports to expose from the container. Exposing a port here gives
// the system additional information about the network connections a
// container uses, but is primarily informational. Not specifying a port here
// DOES NOT prevent that port from being exposed. Any port which is
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Cannot be updated.
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
// +listType=map
// +listMapKey=containerPort
// +listMapKey=protocol
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
// Compute Resources required by this container.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the container.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
// Periodic probe of container liveness.
// Container will be restarted if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Periodic probe of container service readiness.
// Container will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// StartupProbe indicates that the Pod has successfully initialized.
// If specified, no other probes are executed until this completes successfully.
// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
// when it might take a long time to load data or warm a cache, than during steady-state operation.
// This cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
// Actions that the management system should take in response to container lifecycle events.
// Cannot be updated.
// +optional
Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Optional: Path at which the file to which the container's termination message
// will be written is mounted into the container's filesystem.
// Message written is intended to be brief final status, such as an assertion failure message.
// Will be truncated by the node if greater than 4096 bytes. The total message length across
// all containers will be limited to 12kb.
// Defaults to /dev/termination-log.
// Cannot be updated.
// +optional
TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Indicate how the termination message should be populated. File will use the contents of
// terminationMessagePath to populate the container status message on both success and failure.
// FallbackToLogsOnError will use the last chunk of container log output if the termination
// message file is empty and the container exited with an error.
// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
// Defaults to File.
// Cannot be updated.
// +optional
TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// SecurityContext defines the security options the container should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
// and shouldn't be used for general purpose containers.
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
// Default is false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the container is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
// +optional
StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
// Default is false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
}
// ProbeHandler defines a specific action that should be taken in a probe.
// One and only one of the fields must be specified.
type ProbeHandler struct {
// Exec specifies the action to take.
// +optional
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
// HTTPGet specifies the http request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
// TCPSocket specifies an action involving a TCP port.
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
// GRPC specifies an action involving a GRPC port.
// This is a beta field and requires enabling GRPCContainerProbe feature gate.
// +featureGate=GRPCContainerProbe
// +optional
GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"`
}
// LifecycleHandler defines a specific action that should be taken in a lifecycle
// hook. One and only one of the fields, except TCPSocket must be specified.
type LifecycleHandler struct {
// Exec specifies the action to take.
// +optional
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
// HTTPGet specifies the http request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
// for the backward compatibility. There are no validation of this field and
// lifecycle hooks will fail in runtime when tcp handler is specified.
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
}
// Lifecycle describes actions that the management system should take in response to container lifecycle
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
// until the action is complete, unless the container process fails, in which case the handler is aborted.
type Lifecycle struct {
// PostStart is called immediately after a container is created. If the handler fails,
// the container is terminated and restarted according to its restart policy.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PostStart *LifecycleHandler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"`
// PreStop is called immediately before a container is terminated due to an
// API request or management event such as liveness/startup probe failure,
// preemption, resource contention, etc. The handler is not called if the
// container crashes or exits. The Pod's termination grace period countdown begins before the
// PreStop hook is executed. Regardless of the outcome of the handler, the
// container will eventually terminate within the Pod's termination grace
// period (unless delayed by finalizers). Other management of the container blocks until the hook completes
// or until the termination grace period is reached.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PreStop *LifecycleHandler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
}
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// ContainerStateWaiting is a waiting state of a container.
type ContainerStateWaiting struct {
// (brief) reason the container is not yet running.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"`
// Message regarding why the container is not yet running.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
}
// ContainerStateRunning is a running state of a container.
type ContainerStateRunning struct {
// Time at which the container was last (re-)started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"`
}
// ContainerStateTerminated is a terminated state of a container.
type ContainerStateTerminated struct {
// Exit status from the last termination of the container
ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"`
// Signal from the last termination of the container
// +optional
Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"`
// (brief) reason from the last termination of the container
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Message regarding the last termination of the container
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// Time at which previous execution of the container started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"`
// Time at which the container last terminated
// +optional
FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"`
// Container's ID in the format '<type>://<container_id>'
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"`
}
// ContainerState holds a possible state of container.
// Only one of its members may be specified.
// If none of them is specified, the default one is ContainerStateWaiting.
type ContainerState struct {
// Details about a waiting container
// +optional
Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"`
// Details about a running container
// +optional
Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"`
// Details about a terminated container
// +optional
Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"`
}
// ContainerStatus contains details for the current status of this container.
type ContainerStatus struct {
// This must be a DNS_LABEL. Each container in a pod must have a unique name.
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Details about the container's current condition.
// +optional
State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"`
// Details about the container's last termination condition.
// +optional
LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"`
// Specifies whether the container has passed its readiness probe.
Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"`
// The number of times the container has been restarted.
RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"`
// The image the container is running.
// More info: https://kubernetes.io/docs/concepts/containers/images.
Image string `json:"image" protobuf:"bytes,6,opt,name=image"`
// ImageID of the container's image.
ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"`
// Container's ID in the format '<type>://<container_id>'.
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"`
// Specifies whether the container has passed its startup probe.
// Initialized as false, becomes true after startupProbe is considered successful.
// Resets to false when the container is restarted, or if kubelet loses state temporarily.
// Is always true when no startupProbe is defined.
// +optional
Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"`
}
// PodPhase is a label for the condition of a pod at the current time.
// +enum
type PodPhase string
// These are the valid statuses of pods.
const (
// PodPending means the pod has been accepted by the system, but one or more of the containers
// has not been started. This includes time before being bound to a node, as well as time spent
// pulling images onto the host.
PodPending PodPhase = "Pending"
// PodRunning means the pod has been bound to a node and all of the containers have been started.
// At least one container is still running or is in the process of being restarted.
PodRunning PodPhase = "Running"
// PodSucceeded means that all containers in the pod have voluntarily terminated
// with a container exit code of 0, and the system is not going to restart any of these containers.
PodSucceeded PodPhase = "Succeeded"
// PodFailed means that all containers in the pod have terminated, and at least one container has
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
PodFailed PodPhase = "Failed"
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
// to an error in communicating with the host of the pod.
// Deprecated: It isn't being set since 2015 (74da3b14b0c0f658b3bb8d2def5094686d0e9095)
PodUnknown PodPhase = "Unknown"
)
// PodConditionType is a valid value for PodCondition.Type
type PodConditionType string
// These are built-in conditions of pod. An application may use a custom condition not listed here.
const (
// ContainersReady indicates whether all containers in the pod are ready.
ContainersReady PodConditionType = "ContainersReady"
// PodInitialized means that all init containers in the pod have started successfully.
PodInitialized PodConditionType = "Initialized"
// PodReady means the pod is able to service requests and should be added to the
// load balancing pools of all matching services.
PodReady PodConditionType = "Ready"
// PodScheduled represents status of the scheduling process for this pod.
PodScheduled PodConditionType = "PodScheduled"
)
// These are reasons for a pod's transition to a condition.
const (
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
PodReasonUnschedulable = "Unschedulable"
)
// PodCondition contains details for the current condition of this pod.
type PodCondition struct {
// Type is the type of the condition.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
// Status is the status of the condition.
// Can be True, False, Unknown.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// RestartPolicy describes how the container should be restarted.
// Only one of the following restart policies may be specified.
// If none of the following policies is specified, the default one
// is RestartPolicyAlways.
// +enum
type RestartPolicy string
const (
RestartPolicyAlways RestartPolicy = "Always"
RestartPolicyOnFailure RestartPolicy = "OnFailure"
RestartPolicyNever RestartPolicy = "Never"
)
// DNSPolicy defines how a pod's DNS will be configured.
// +enum
type DNSPolicy string
const (
// DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS
// first, if it is available, then fall back on the default
// (as determined by kubelet) DNS settings.
DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet"
// DNSClusterFirst indicates that the pod should use cluster DNS
// first unless hostNetwork is true, if it is available, then
// fall back on the default (as determined by kubelet) DNS settings.
DNSClusterFirst DNSPolicy = "ClusterFirst"
// DNSDefault indicates that the pod should use the default (as
// determined by kubelet) DNS settings.
DNSDefault DNSPolicy = "Default"
// DNSNone indicates that the pod should use empty DNS settings. DNS
// parameters such as nameservers and search paths should be defined via
// DNSConfig.
DNSNone DNSPolicy = "None"
)
const (
// DefaultTerminationGracePeriodSeconds indicates the default duration in
// seconds a pod needs to terminate gracefully.
DefaultTerminationGracePeriodSeconds = 30
)
// A node selector represents the union of the results of one or more label queries
// over a set of nodes; that is, it represents the OR of the selectors represented
// by the node selector terms.
// +structType=atomic
type NodeSelector struct {
//Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
}
// A null or empty node selector term matches no objects. The requirements of
// them are ANDed.
// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
// +structType=atomic
type NodeSelectorTerm struct {
// A list of node selector requirements by node's labels.
// +optional
MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"`
// A list of node selector requirements by node's fields.
// +optional
MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" protobuf:"bytes,2,rep,name=matchFields"`
}
// A node selector requirement is a selector that contains values, a key, and an operator
// that relates the key and values.
type NodeSelectorRequirement struct {
// The label key that the selector applies to.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Represents a key's relationship to a set of values.
// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"`
// An array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty. If the operator is Gt or Lt, the values
// array must have a single element, which will be interpreted as an integer.
// This array is replaced during a strategic merge patch.
// +optional
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
// A node selector operator is the set of operators that can be used in
// a node selector requirement.
// +enum
type NodeSelectorOperator string
const (
NodeSelectorOpIn NodeSelectorOperator = "In"
NodeSelectorOpNotIn NodeSelectorOperator = "NotIn"
NodeSelectorOpExists NodeSelectorOperator = "Exists"
NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
NodeSelectorOpGt NodeSelectorOperator = "Gt"
NodeSelectorOpLt NodeSelectorOperator = "Lt"
)
// A topology selector term represents the result of label queries.
// A null or empty topology selector term matches no objects.
// The requirements of them are ANDed.
// It provides a subset of functionality as NodeSelectorTerm.
// This is an alpha feature and may change in the future.
// +structType=atomic
type TopologySelectorTerm struct {
// Usage: Fields of type []TopologySelectorTerm must be listType=atomic.
// A list of topology selector requirements by labels.
// +optional
MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" protobuf:"bytes,1,rep,name=matchLabelExpressions"`
}
// A topology selector requirement is a selector that matches given label.
// This is an alpha feature and may change in the future.
type TopologySelectorLabelRequirement struct {
// The label key that the selector applies to.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// An array of string values. One value must match the label to be selected.
// Each entry in Values is ORed.
Values []string `json:"values" protobuf:"bytes,2,rep,name=values"`
}
// Affinity is a group of affinity scheduling rules.
type Affinity struct {
// Describes node affinity scheduling rules for the pod.
// +optional
NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"`
// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"`
// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"`
}
// Pod affinity is a group of inter pod affinity scheduling rules.
type PodAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
type PodAntiAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the anti-affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling anti-affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
type WeightedPodAffinityTerm struct {
// weight associated with matching the corresponding podAffinityTerm,
// in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// Required. A pod affinity term, associated with the corresponding weight.
PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"`
}
// Defines a set of pods (namely those matching the labelSelector
// relative to the given namespace(s)) that this pod should be
// co-located (affinity) or not co-located (anti-affinity) with,
// where co-located is defined as running on a node whose value of
// the label with key <topologyKey> matches that of any node on which
// a pod of the set of pods is running
type PodAffinityTerm struct {
// A label query over a set of resources, in this case pods.
// +optional
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
// namespaces specifies a static list of namespace names that the term applies to.
// The term is applied to the union of the namespaces listed in this field
// and the ones selected by namespaceSelector.
// null or empty namespaces list and null namespaceSelector means "this pod's namespace".
// +optional
Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"`
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
// whose value of the label with key topologyKey matches that of any node on which any of the
// selected pods is running.
// Empty topologyKey is not allowed.
TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"`
// A label query over the set of namespaces that the term applies to.
// The term is applied to the union of the namespaces selected by this field
// and the ones listed in the namespaces field.
// null selector and null or empty namespaces list means "this pod's namespace".
// An empty selector ({}) matches all namespaces.
// +optional
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,4,opt,name=namespaceSelector"`
}
// Node affinity is a group of node affinity scheduling rules.
type NodeAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// will try to eventually evict the pod from its node.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// may or may not try to eventually evict the pod from its node.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node matches the corresponding matchExpressions; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// An empty preferred scheduling term matches all objects with implicit weight 0
// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
type PreferredSchedulingTerm struct {
// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// A node selector term, associated with the corresponding weight.
Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
}
// The node this Taint is attached to has the "effect" on
// any pod that does not tolerate the Taint.
type Taint struct {
// Required. The taint key to be applied to a node.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// The taint value corresponding to the taint key.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Required. The effect of the taint on pods
// that do not tolerate the taint.
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
// TimeAdded represents the time at which the taint was added.
// It is only written for NoExecute taints.
// +optional
TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
}
// +enum
type TaintEffect string
const (
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
// Enforced by the scheduler.
TaintEffectNoSchedule TaintEffect = "NoSchedule"
// Like TaintEffectNoSchedule, but the scheduler tries not to schedule
// new pods onto the node, rather than prohibiting new pods from scheduling
// onto the node entirely. Enforced by the scheduler.
TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// Like TaintEffectNoSchedule, but additionally do not allow pods submitted to
// Kubelet without going through the scheduler to start.
// Enforced by Kubelet and the scheduler.
// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
// Evict any already-running pods that do not tolerate the taint.
// Currently enforced by NodeController.
TaintEffectNoExecute TaintEffect = "NoExecute"
)
// The pod this Toleration is attached to tolerates any taint that matches
// the triple <key,value,effect> using the matching operator <operator>.
type Toleration struct {
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
// +optional
Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
// Operator represents a key's relationship to the value.
// Valid operators are Exists and Equal. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
// +optional
Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
// Value is the taint value the toleration matches to.
// If the operator is Exists, the value should be empty, otherwise just a regular string.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
// Effect indicates the taint effect to match. Empty means match all taint effects.
// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
// +optional
Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
// TolerationSeconds represents the period of time the toleration (which must be
// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
// it is not set, which means tolerate the taint forever (do not evict). Zero and
// negative values will be treated as 0 (evict immediately) by the system.
// +optional
TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"`
}
// A toleration operator is the set of operators that can be used in a toleration.
// +enum
type TolerationOperator string
const (
TolerationOpExists TolerationOperator = "Exists"
TolerationOpEqual TolerationOperator = "Equal"
)
// PodReadinessGate contains the reference to a pod condition
type PodReadinessGate struct {
// ConditionType refers to a condition in the pod's condition list with matching type.
ConditionType PodConditionType `json:"conditionType" protobuf:"bytes,1,opt,name=conditionType,casttype=PodConditionType"`
}
// PodSpec is a description of a pod.
type PodSpec struct {
// List of volumes that can be mounted by containers belonging to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes
// +optional
// +patchMergeKey=name
// +patchStrategy=merge,retainKeys
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
// List of initialization containers belonging to the pod.
// Init containers are executed in order prior to containers being started. If any
// init container fails, the pod is considered to have failed and is handled according
// to its restartPolicy. The name for an init container or normal container must be
// unique among all containers.
// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
// of that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
// +patchMergeKey=name
// +patchStrategy=merge
InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
// List of containers belonging to the pod.
// Containers cannot currently be added or removed.
// There must be at least one container in a Pod.
// Cannot be updated.
// +patchMergeKey=name
// +patchStrategy=merge
Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
// List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing
// pod to perform user-initiated actions such as debugging. This list cannot be specified when
// creating a pod, and it cannot be modified by updating the pod spec. In order to add an
// ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
// This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"`
// Restart policy for all containers within the pod.
// One of Always, OnFailure, Never.
// Default to Always.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
// +optional
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
// Value must be non-negative integer. The value zero indicates stop immediately via
// the kill signal (no opportunity to shut down).
// If this value is nil, the default grace period will be used instead.
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
// Defaults to 30 seconds.
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
// Optional duration in seconds the pod may be active on the node relative to
// StartTime before the system will actively try to mark it failed and kill associated containers.
// Value must be a positive integer.
// +optional
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
// Set DNS policy for the pod.
// Defaults to "ClusterFirst".
// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
// To have DNS options set along with hostNetwork, you have to specify DNS policy
// explicitly to 'ClusterFirstWithHostNet'.
// +optional
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
// +mapType=atomic
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
// Deprecated: Use serviceAccountName instead.
// +k8s:conversion-gen=false
// +optional
DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"`
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
// requirements.
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
// Host networking requested for this pod. Use the host's network namespace.
// If this option is set, the ports that will be used must be specified.
// Default to false.
// +k8s:conversion-gen=false
// +optional
HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
// Use the host's pid namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
// Use the host's ipc namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
// Share a single process namespace between all of the containers in a pod.
// When this is set containers will be able to view and signal processes from other containers
// in the same pod, and the first process in each container will not be assigned PID 1.
// HostPID and ShareProcessNamespace cannot both be set.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
// If specified, these secrets will be passed to individual puller implementations for them to use. For example,
// in the case of docker, only DockerConfig type secrets are honored.
// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
// Specifies the hostname of the Pod
// If not specified, the pod's hostname will be set to a system-defined value.
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
// If not specified, the pod will not have a domainname at all.
// +optional
Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
// If specified, the pod's scheduling constraints
// +optional
Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
// If specified, the pod will be dispatched by specified scheduler.
// If not specified, the pod will be dispatched by default scheduler.
// +optional
SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
// If specified, the pod's tolerations.
// +optional
Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
// file if specified. This is only valid for non-hostNetwork pods.
// +optional
// +patchMergeKey=ip
// +patchStrategy=merge
HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"`
// If specified, indicates the pod's priority. "system-node-critical" and
// "system-cluster-critical" are two special keywords which indicate the
// highest priorities with the former being the highest priority. Any other
// name must be defined by creating a PriorityClass object with that name.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"`
// The priority value. Various system components use this field to find the
// priority of the pod. When Priority Admission Controller is enabled, it
// prevents users from setting this field. The admission controller populates
// this field from PriorityClassName.
// The higher the value, the higher the priority.
// +optional
Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"`
// Specifies the DNS parameters of a pod.
// Parameters specified here will be merged to the generated DNS
// configuration based on DNSPolicy.
// +optional
DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
// If specified, all readiness gates will be evaluated for pod readiness.
// A pod is ready when all its containers are ready AND
// all conditions specified in the readiness gates have status equal to "True"
// More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
// +optional
ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"`
// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
// to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
// If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
// empty definition that uses the default runtime handler.
// More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
// +optional
RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"`
// EnableServiceLinks indicates whether information about services should be injected into pod's
// environment variables, matching the syntax of Docker links.
// Optional: Defaults to true.
// +optional
EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"`
// PreemptionPolicy is the Policy for preempting pods with lower priority.
// One of Never, PreemptLowerPriority.
// Defaults to PreemptLowerPriority if unset.
// +optional
PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"`
// Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
// This field will be autopopulated at admission time by the RuntimeClass admission controller. If
// the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
// The RuntimeClass admission controller will reject Pod create requests which have the overhead already
// set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
// defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
// More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
// +optional
Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"`
// TopologySpreadConstraints describes how a group of pods ought to spread across topology
// domains. Scheduler will schedule pods in a way which abides by the constraints.
// All topologySpreadConstraints are ANDed.
// +optional
// +patchMergeKey=topologyKey
// +patchStrategy=merge
// +listType=map
// +listMapKey=topologyKey
// +listMapKey=whenUnsatisfiable
TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"`
// If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
// In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
// In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
// If a pod does not have FQDN, this has no effect.
// Default to false.
// +optional
SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty" protobuf:"varint,35,opt,name=setHostnameAsFQDN"`
// Specifies the OS of the containers in the pod.
// Some pod and container fields are restricted if this is set.
//
// If the OS field is set to linux, the following fields must be unset:
// -securityContext.windowsOptions
//
// If the OS field is set to windows, following fields must be unset:
// - spec.hostPID
// - spec.hostIPC
// - spec.securityContext.seLinuxOptions
// - spec.securityContext.seccompProfile
// - spec.securityContext.fsGroup
// - spec.securityContext.fsGroupChangePolicy
// - spec.securityContext.sysctls
// - spec.shareProcessNamespace
// - spec.securityContext.runAsUser
// - spec.securityContext.runAsGroup
// - spec.securityContext.supplementalGroups
// - spec.containers[*].securityContext.seLinuxOptions
// - spec.containers[*].securityContext.seccompProfile
// - spec.containers[*].securityContext.capabilities
// - spec.containers[*].securityContext.readOnlyRootFilesystem
// - spec.containers[*].securityContext.privileged
// - spec.containers[*].securityContext.allowPrivilegeEscalation
// - spec.containers[*].securityContext.procMount
// - spec.containers[*].securityContext.runAsUser
// - spec.containers[*].securityContext.runAsGroup
// +optional
// This is a beta field and requires the IdentifyPodOS feature
OS *PodOS `json:"os,omitempty" protobuf:"bytes,36,opt,name=os"`
}
// OSName is the set of OS'es that can be used in OS.
type OSName string
// These are valid values for OSName
const (
Linux OSName = "linux"
Windows OSName = "windows"
)
// PodOS defines the OS parameters of a pod.
type PodOS struct {
// Name is the name of the operating system. The currently supported values are linux and windows.
// Additional value may be defined in future and can be one of:
// https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
// Clients should expect to handle additional values and treat unrecognized values in this field as os: null
Name OSName `json:"name" protobuf:"bytes,1,opt,name=name"`
}
// +enum
type UnsatisfiableConstraintAction string
const (
// DoNotSchedule instructs the scheduler not to schedule the pod
// when constraints are not satisfied.
DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule"
// ScheduleAnyway instructs the scheduler to schedule the pod
// even if constraints are not satisfied.
ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway"
)
// TopologySpreadConstraint specifies how to spread matching pods among the given topology.
type TopologySpreadConstraint struct {
// MaxSkew describes the degree to which pods may be unevenly distributed.
// When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
// between the number of matching pods in the target topology and the global minimum.
// The global minimum is the minimum number of matching pods in an eligible domain
// or zero if the number of eligible domains is less than MinDomains.
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
// labelSelector spread as 2/2/1:
// In this case, the global minimum is 1.
// +-------+-------+-------+
// | zone1 | zone2 | zone3 |
// +-------+-------+-------+
// | P P | P P | P |
// +-------+-------+-------+
// - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
// scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
// violate MaxSkew(1).
// - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
// When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
// to topologies that satisfy it.
// It's a required field. Default value is 1 and 0 is not allowed.
MaxSkew int32 `json:"maxSkew" protobuf:"varint,1,opt,name=maxSkew"`
// TopologyKey is the key of node labels. Nodes that have a label with this key
// and identical values are considered to be in the same topology.
// We consider each <key, value> as a "bucket", and try to put balanced number
// of pods into each bucket.
// We define a domain as a particular instance of a topology.
// Also, we define an eligible domain as a domain whose nodes match the node selector.
// e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
// And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
// It's a required field.
TopologyKey string `json:"topologyKey" protobuf:"bytes,2,opt,name=topologyKey"`
// WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
// the spread constraint.
// - DoNotSchedule (default) tells the scheduler not to schedule it.
// - ScheduleAnyway tells the scheduler to schedule the pod in any location,
// but giving higher precedence to topologies that would help reduce the
// skew.
// A constraint is considered "Unsatisfiable" for an incoming pod
// if and only if every possible node assignment for that pod would violate
// "MaxSkew" on some topology.
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
// labelSelector spread as 3/1/1:
// +-------+-------+-------+
// | zone1 | zone2 | zone3 |
// +-------+-------+-------+
// | P P P | P | P |
// +-------+-------+-------+
// If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
// to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
// MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
// won't make it *more* imbalanced.
// It's a required field.
WhenUnsatisfiable UnsatisfiableConstraintAction `json:"whenUnsatisfiable" protobuf:"bytes,3,opt,name=whenUnsatisfiable,casttype=UnsatisfiableConstraintAction"`
// LabelSelector is used to find matching pods.
// Pods that match this label selector are counted to determine the number of pods
// in their corresponding topology domain.
// +optional
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,4,opt,name=labelSelector"`
// MinDomains indicates a minimum number of eligible domains.
// When the number of eligible domains with matching topology keys is less than minDomains,
// Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
// And when the number of eligible domains with matching topology keys equals or greater than minDomains,
// this value has no effect on scheduling.
// As a result, when the number of eligible domains is less than minDomains,
// scheduler won't schedule more than maxSkew Pods to those domains.
// If value is nil, the constraint behaves as if MinDomains is equal to 1.
// Valid values are integers greater than 0.
// When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
//
// For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
// labelSelector spread as 2/2/2:
// +-------+-------+-------+
// | zone1 | zone2 | zone3 |
// +-------+-------+-------+
// | P P | P P | P P |
// +-------+-------+-------+
// The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
// In this situation, new pod with the same labelSelector cannot be scheduled,
// because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
// it will violate MaxSkew.
//
// This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate.
// +optional
MinDomains *int32 `json:"minDomains,omitempty" protobuf:"varint,5,opt,name=minDomains"`
}
const (
// The default value for enableServiceLinks attribute.
DefaultEnableServiceLinks = true
)
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
// pod's hosts file.
type HostAlias struct {
// IP address of the host file entry.
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostnames for the above IP address.
Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
}
// PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume
// when volume is mounted.
// +enum
type PodFSGroupChangePolicy string
const (
// FSGroupChangeOnRootMismatch indicates that volume's ownership and permissions will be changed
// only when permission and ownership of root directory does not match with expected
// permissions on the volume. This can help shorten the time it takes to change
// ownership and permissions of a volume.
FSGroupChangeOnRootMismatch PodFSGroupChangePolicy = "OnRootMismatch"
// FSGroupChangeAlways indicates that volume's ownership and permissions
// should always be changed whenever volume is mounted inside a Pod. This the default
// behavior.
FSGroupChangeAlways PodFSGroupChangePolicy = "Always"
)
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
type PodSecurityContext struct {
// The SELinux context to be applied to all containers.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in SecurityContext. If set in
// both SecurityContext and PodSecurityContext, the value specified in SecurityContext
// takes precedence for that container.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"`
// The Windows specific settings applied to all containers.
// If unspecified, the options within a container's SecurityContext will be used.
// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
// Note that this field cannot be set when spec.os.name is linux.
// +optional
WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence
// for that container.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"`
// The GID to run the entrypoint of the container process.
// Uses runtime default if unset.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence
// for that container.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,6,opt,name=runAsGroup"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
// A list of groups applied to the first process run in each container, in addition
// to the container's primary GID. If unspecified, no groups will be added to
// any container.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
// A special supplemental group that applies to all containers in a pod.
// Some volume types allow the Kubelet to change the ownership of that volume
// to be owned by the pod:
//
// 1. The owning GID will be the FSGroup
// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
// 3. The permission bits are OR'd with rw-rw----
//
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"`
// Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
// sysctls (by the container runtime) might fail to launch.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"`
// fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
// before being exposed inside Pod. This field will only apply to
// volume types which support fsGroup based ownership(and permissions).
// It will have no effect on ephemeral volume types such as: secret, configmaps
// and emptydir.
// Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"`
// The seccomp options to use by the containers in this pod.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,10,opt,name=seccompProfile"`
}
// SeccompProfile defines a pod/container's seccomp profile settings.
// Only one profile source may be set.
// +union
type SeccompProfile struct {
// type indicates which kind of seccomp profile will be applied.
// Valid options are:
//
// Localhost - a profile defined in a file on the node should be used.
// RuntimeDefault - the container runtime default profile should be used.
// Unconfined - no profile should be applied.
// +unionDiscriminator
Type SeccompProfileType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=SeccompProfileType"`
// localhostProfile indicates a profile defined in a file on the node should be used.
// The profile must be preconfigured on the node to work.
// Must be a descending path, relative to the kubelet's configured seccomp profile location.
// Must only be set if type is "Localhost".
// +optional
LocalhostProfile *string `json:"localhostProfile,omitempty" protobuf:"bytes,2,opt,name=localhostProfile"`
}
// SeccompProfileType defines the supported seccomp profile types.
// +enum
type SeccompProfileType string
const (
// SeccompProfileTypeUnconfined indicates no seccomp profile is applied (A.K.A. unconfined).
SeccompProfileTypeUnconfined SeccompProfileType = "Unconfined"
// SeccompProfileTypeRuntimeDefault represents the default container runtime seccomp profile.
SeccompProfileTypeRuntimeDefault SeccompProfileType = "RuntimeDefault"
// SeccompProfileTypeLocalhost indicates a profile defined in a file on the node should be used.
// The file's location relative to <kubelet-root-dir>/seccomp.
SeccompProfileTypeLocalhost SeccompProfileType = "Localhost"
)
// PodQOSClass defines the supported qos classes of Pods.
// +enum
type PodQOSClass string
const (
// PodQOSGuaranteed is the Guaranteed qos class.
PodQOSGuaranteed PodQOSClass = "Guaranteed"
// PodQOSBurstable is the Burstable qos class.
PodQOSBurstable PodQOSClass = "Burstable"
// PodQOSBestEffort is the BestEffort qos class.
PodQOSBestEffort PodQOSClass = "BestEffort"
)
// PodDNSConfig defines the DNS parameters of a pod in addition to
// those generated from DNSPolicy.
type PodDNSConfig struct {
// A list of DNS name server IP addresses.
// This will be appended to the base nameservers generated from DNSPolicy.
// Duplicated nameservers will be removed.
// +optional
Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"`
// A list of DNS search domains for host-name lookup.
// This will be appended to the base search paths generated from DNSPolicy.
// Duplicated search paths will be removed.
// +optional
Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"`
// A list of DNS resolver options.
// This will be merged with the base options generated from DNSPolicy.
// Duplicated entries will be removed. Resolution options given in Options
// will override those that appear in the base DNSPolicy.
// +optional
Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"`
}
// PodDNSConfigOption defines DNS resolver options of a pod.
type PodDNSConfigOption struct {
// Required.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// +optional
Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
}
// IP address information for entries in the (plural) PodIPs field.
// Each entry includes:
// IP: An IP address allocated to the pod. Routable at least within the cluster.
type PodIP struct {
// ip is an IP address (IPv4 or IPv6) assigned to the pod
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
}
// EphemeralContainerCommon is a copy of all fields in Container to be inlined in
// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer
// to Container and allows separate documentation for the fields of EphemeralContainer.
// When a new field is added to Container it must be added here as well.
type EphemeralContainerCommon struct {
// Name of the ephemeral container specified as a DNS_LABEL.
// This name must be unique among all containers, init containers and ephemeral containers.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Container image name.
// More info: https://kubernetes.io/docs/concepts/containers/images
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Container's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// Ports are not allowed for ephemeral containers.
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
// +listType=map
// +listMapKey=containerPort
// +listMapKey=protocol
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
// Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
// already allocated to the pod.
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the container.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
// Probes are not allowed for ephemeral containers.
// +optional
LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Probes are not allowed for ephemeral containers.
// +optional
ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// Probes are not allowed for ephemeral containers.
// +optional
StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
// Lifecycle is not allowed for ephemeral containers.
// +optional
Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Optional: Path at which the file to which the container's termination message
// will be written is mounted into the container's filesystem.
// Message written is intended to be brief final status, such as an assertion failure message.
// Will be truncated by the node if greater than 4096 bytes. The total message length across
// all containers will be limited to 12kb.
// Defaults to /dev/termination-log.
// Cannot be updated.
// +optional
TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Indicate how the termination message should be populated. File will use the contents of
// terminationMessagePath to populate the container status message on both success and failure.
// FallbackToLogsOnError will use the last chunk of container log output if the termination
// message file is empty and the container exited with an error.
// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
// Defaults to File.
// Cannot be updated.
// +optional
TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// Optional: SecurityContext defines the security options the ephemeral container should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
// and shouldn't be used for general purpose containers.
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
// Default is false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the container is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
// +optional
StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
// Default is false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
}
// EphemeralContainerCommon converts to Container. All fields must be kept in sync between
// these two types.
var _ = Container(EphemeralContainerCommon{})
// An EphemeralContainer is a temporary container that you may add to an existing Pod for
// user-initiated activities such as debugging. Ephemeral containers have no resource or
// scheduling guarantees, and they will not be restarted when they exit or when a Pod is
// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the
// Pod to exceed its resource allocation.
//
// To add an ephemeral container, use the ephemeralcontainers subresource of an existing
// Pod. Ephemeral containers may not be removed or restarted.
//
// This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate.
type EphemeralContainer struct {
// Ephemeral containers have all of the fields of Container, plus additional fields
// specific to ephemeral containers. Fields in common with Container are in the
// following inlined struct so than an EphemeralContainer may easily be converted
// to a Container.
EphemeralContainerCommon `json:",inline" protobuf:"bytes,1,req"`
// If set, the name of the container from PodSpec that this ephemeral container targets.
// The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.
// If not set then the ephemeral container uses the namespaces configured in the Pod spec.
//
// The container runtime must implement support for this feature. If the runtime does not
// support namespace targeting then the result of setting this field is undefined.
// +optional
TargetContainerName string `json:"targetContainerName,omitempty" protobuf:"bytes,2,opt,name=targetContainerName"`
}
// PodStatus represents information about the status of a pod. Status may trail the actual
// state of a system, especially if the node that hosts the pod cannot contact the control
// plane.
type PodStatus struct {
// The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
// The conditions array, the reason and message fields, and the individual container status
// arrays contain more detail about the pod's status.
// There are five possible phase values:
//
// Pending: The pod has been accepted by the Kubernetes system, but one or more of the
// container images has not been created. This includes time before being scheduled as
// well as time spent downloading images over the network, which could take a while.
// Running: The pod has been bound to a node, and all of the containers have been created.
// At least one container is still running, or is in the process of starting or restarting.
// Succeeded: All containers in the pod have terminated in success, and will not be restarted.
// Failed: All containers in the pod have terminated, and at least one container has
// terminated in failure. The container either exited with non-zero status or was terminated
// by the system.
// Unknown: For some reason the state of the pod could not be obtained, typically due to an
// error in communicating with the host of the pod.
//
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
// +optional
Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"`
// Current service state of pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
// A human readable message indicating details about why the pod is in this condition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// A brief CamelCase message indicating details about why the pod is in this state.
// e.g. 'Evicted'
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be
// scheduled right away as preemption victims receive their graceful termination periods.
// This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide
// to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to
// give the resources on this node to a higher priority pod that is created after preemption.
// As a result, this field may be different than PodSpec.nodeName when the pod is
// scheduled.
// +optional
NominatedNodeName string `json:"nominatedNodeName,omitempty" protobuf:"bytes,11,opt,name=nominatedNodeName"`
// IP address of the host to which the pod is assigned. Empty if not yet scheduled.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
// IP address allocated to the pod. Routable at least within the cluster.
// Empty if not yet allocated.
// +optional
PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
// podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must
// match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list
// is empty if no IPs have been allocated yet.
// +optional
// +patchStrategy=merge
// +patchMergeKey=ip
PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"`
// RFC 3339 date and time at which the object was acknowledged by the Kubelet.
// This is before the Kubelet pulled the container image(s) for the pod.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
// The list has one entry per init container in the manifest. The most recent successful
// init container will have ready = true, the most recently started container will have
// startTime set.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
// The list has one entry per container in the manifest.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
// +optional
ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
// The Quality of Service (QOS) classification assigned to the pod based on resource requirements
// See PodQOSClass type for available QOS classes
// More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md
// +optional
QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"`
// Status for any ephemeral containers that have run in this pod.
// This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.
// +optional
EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
type PodStatusResult struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// +genclient
// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Pod is a collection of containers that can run on a host. This resource is created
// by clients and scheduled onto hosts.
type Pod struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodList is a list of Pods.
type PodList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pods.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md
Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PodTemplateSpec describes the data a pod should have when created from a template
type PodTemplateSpec struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplate describes a template for creating copies of a predefined pod.
type PodTemplate struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Template defines the pods that will be created from this pod template.
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplateList is a list of PodTemplates.
type PodTemplateList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pod templates
Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ReplicationControllerSpec is the specification of a replication controller.
type ReplicationControllerSpec struct {
// Replicas is the number of desired replicas.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// Selector is a label query over pods that should match the Replicas count.
// If Selector is empty, it is defaulted to the labels present on the Pod template.
// Label keys and values that must match in order to be controlled by this replication
// controller, if empty defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
// +mapType=atomic
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// TemplateRef is a reference to an object that describes the pod that will be created if
// insufficient replicas are detected.
// Reference to an object that describes the pod that will be created if insufficient replicas are detected.
// +optional
// TemplateRef *ObjectReference `json:"templateRef,omitempty"`
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. This takes precedence over a TemplateRef.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
// +optional
Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// ReplicationControllerStatus represents the current status of a replication
// controller.
type ReplicationControllerStatus struct {
// Replicas is the most recently oberved number of replicas.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
// The number of pods that have labels matching the labels of the pod template of the replication controller.
// +optional
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
// The number of ready replicas for this replication controller.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
// The number of available replicas (ready for at least minReadySeconds) for this replication controller.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
// ObservedGeneration reflects the generation of the most recently observed replication controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
// Represents the latest available observations of a replication controller's current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
}
type ReplicationControllerConditionType string
// These are valid conditions of a replication controller.
const (
// ReplicationControllerReplicaFailure is added in a replication controller when one of its pods
// fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors,
// etc. or deleted due to kubelet being down or finalizers are failing.
ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure"
)
// ReplicationControllerCondition describes the state of a replication controller at a certain point.
type ReplicationControllerCondition struct {
// Type of replication controller condition.
Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationController represents the configuration of a replication controller.
type ReplicationController struct {
metav1.TypeMeta `json:",inline"`
// If the Labels of a ReplicationController are empty, they are defaulted to
// be the same as the Pod(s) that the replication controller manages.
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the specification of the desired behavior of the replication controller.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status is the most recently observed status of the replication controller.
// This data may be out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationControllerList is a collection of replication controllers.
type ReplicationControllerList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of replication controllers.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Session Affinity Type string
// +enum
type ServiceAffinity string
const (
// ServiceAffinityClientIP is the Client IP based.
ServiceAffinityClientIP ServiceAffinity = "ClientIP"
// ServiceAffinityNone - no session affinity.
ServiceAffinityNone ServiceAffinity = "None"
)
const DefaultClientIPServiceAffinitySeconds int32 = 10800
// SessionAffinityConfig represents the configurations of session affinity.
type SessionAffinityConfig struct {
// clientIP contains the configurations of Client IP based session affinity.
// +optional
ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"`
}
// ClientIPConfig represents the configurations of Client IP based session affinity.
type ClientIPConfig struct {
// timeoutSeconds specifies the seconds of ClientIP type session sticky time.
// The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
// Default value is 10800(for 3 hours).
// +optional
TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
}
// Service Type string describes ingress methods for a service
// +enum
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the cluster IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
// ServiceTypeExternalName means a service consists of only a reference to
// an external name that kubedns or equivalent will return as a CNAME
// record, with no exposing or proxying of any pods involved.
ServiceTypeExternalName ServiceType = "ExternalName"
)
// ServiceInternalTrafficPolicyType describes the type of traffic routing for
// internal traffic
// +enum
type ServiceInternalTrafficPolicyType string
const (
// ServiceInternalTrafficPolicyCluster routes traffic to all endpoints
ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicyType = "Cluster"
// ServiceInternalTrafficPolicyLocal only routes to node-local
// endpoints, otherwise drops the traffic
ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicyType = "Local"
)
// Service External Traffic Policy Type string
// +enum
type ServiceExternalTrafficPolicyType string
const (
// ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior.
ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local"
// ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior.
ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
)
// These are the valid conditions of a service.
const (
// LoadBalancerPortsError represents the condition of the requested ports
// on the cloud load balancer instance.
LoadBalancerPortsError = "LoadBalancerPortsError"
)
// ServiceStatus represents the current status of a service.
type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
// +optional
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
// Current service state
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=map
// +listMapKey=type
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// LoadBalancerStatus represents the status of a load-balancer.
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer.
// Traffic intended for the service should be sent to these ingress points.
// +optional
Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
// +optional
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
// Ports is a list of records of service ports
// If used, every port defined in the service should have an entry in it
// +listType=atomic
// +optional
Ports []PortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"`
}
// IPFamily represents the IP Family (IPv4 or IPv6). This type is used
// to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
// +enum
type IPFamily string
const (
// IPv4Protocol indicates that this IP is IPv4 protocol
IPv4Protocol IPFamily = "IPv4"
// IPv6Protocol indicates that this IP is IPv6 protocol
IPv6Protocol IPFamily = "IPv6"
)
// IPFamilyPolicyType represents the dual-stack-ness requested or required by a Service
// +enum
type IPFamilyPolicyType string
const (
// IPFamilyPolicySingleStack indicates that this service is required to have a single IPFamily.
// The IPFamily assigned is based on the default IPFamily used by the cluster
// or as identified by service.spec.ipFamilies field
IPFamilyPolicySingleStack IPFamilyPolicyType = "SingleStack"
// IPFamilyPolicyPreferDualStack indicates that this service prefers dual-stack when
// the cluster is configured for dual-stack. If the cluster is not configured
// for dual-stack the service will be assigned a single IPFamily. If the IPFamily is not
// set in service.spec.ipFamilies then the service will be assigned the default IPFamily
// configured on the cluster
IPFamilyPolicyPreferDualStack IPFamilyPolicyType = "PreferDualStack"
// IPFamilyPolicyRequireDualStack indicates that this service requires dual-stack. Using
// IPFamilyPolicyRequireDualStack on a single stack cluster will result in validation errors. The
// IPFamilies (and their order) assigned to this service is based on service.spec.ipFamilies. If
// service.spec.ipFamilies was not provided then it will be assigned according to how they are
// configured on the cluster. If service.spec.ipFamilies has only one entry then the alternative
// IPFamily will be added by apiserver
IPFamilyPolicyRequireDualStack IPFamilyPolicyType = "RequireDualStack"
)
// ServiceSpec describes the attributes that a user creates on a service.
type ServiceSpec struct {
// The list of ports that are exposed by this service.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +patchMergeKey=port
// +patchStrategy=merge
// +listType=map
// +listMapKey=port
// +listMapKey=protocol
Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
// Route service traffic to pods with label keys and values matching this
// selector. If empty or not present, the service is assumed to have an
// external process managing its endpoints, which Kubernetes will not
// modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
// Ignored if type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/
// +optional
// +mapType=atomic
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// clusterIP is the IP address of the service and is usually assigned
// randomly. If an address is specified manually, is in-range (as per
// system configuration), and is not in use, it will be allocated to the
// service; otherwise creation of the service will fail. This field may not
// be changed through updates unless the type field is also being changed
// to ExternalName (which requires this field to be blank) or the type
// field is being changed from ExternalName (in which case this field may
// optionally be specified, as describe above). Valid values are "None",
// empty string (""), or a valid IP address. Setting this to "None" makes a
// "headless service" (no virtual IP), which is useful when direct endpoint
// connections are preferred and proxying is not required. Only applies to
// types ClusterIP, NodePort, and LoadBalancer. If this field is specified
// when creating a Service of type ExternalName, creation will fail. This
// field will be wiped when updating a Service to type ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"`
// ClusterIPs is a list of IP addresses assigned to this service, and are
// usually assigned randomly. If an address is specified manually, is
// in-range (as per system configuration), and is not in use, it will be
// allocated to the service; otherwise creation of the service will fail.
// This field may not be changed through updates unless the type field is
// also being changed to ExternalName (which requires this field to be
// empty) or the type field is being changed from ExternalName (in which
// case this field may optionally be specified, as describe above). Valid
// values are "None", empty string (""), or a valid IP address. Setting
// this to "None" makes a "headless service" (no virtual IP), which is
// useful when direct endpoint connections are preferred and proxying is
// not required. Only applies to types ClusterIP, NodePort, and
// LoadBalancer. If this field is specified when creating a Service of type
// ExternalName, creation will fail. This field will be wiped when updating
// a Service to type ExternalName. If this field is not specified, it will
// be initialized from the clusterIP field. If this field is specified,
// clients must ensure that clusterIPs[0] and clusterIP have the same
// value.
//
// This field may hold a maximum of two entries (dual-stack IPs, in either order).
// These IPs must correspond to the values of the ipFamilies field. Both
// clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +listType=atomic
// +optional
ClusterIPs []string `json:"clusterIPs,omitempty" protobuf:"bytes,18,opt,name=clusterIPs"`
// type determines how the Service is exposed. Defaults to ClusterIP. Valid
// options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
// "ClusterIP" allocates a cluster-internal IP address for load-balancing
// to endpoints. Endpoints are determined by the selector or if that is not
// specified, by manual construction of an Endpoints object or
// EndpointSlice objects. If clusterIP is "None", no virtual IP is
// allocated and the endpoints are published as a set of endpoints rather
// than a virtual IP.
// "NodePort" builds on ClusterIP and allocates a port on every node which
// routes to the same endpoints as the clusterIP.
// "LoadBalancer" builds on NodePort and creates an external load-balancer
// (if supported in the current cloud) which routes to the same endpoints
// as the clusterIP.
// "ExternalName" aliases this service to the specified externalName.
// Several other fields do not apply to ExternalName services.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
// +optional
Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
// externalIPs is a list of IP addresses for which nodes in the cluster
// will also accept traffic for this service. These IPs are not managed by
// Kubernetes. The user is responsible for ensuring that traffic arrives
// at a node with this IP. A common example is external load-balancers
// that are not part of the Kubernetes system.
// +optional
ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"`
// Supports "ClientIP" and "None". Used to maintain session affinity.
// Enable client IP based session affinity.
// Must be ClientIP or None.
// Defaults to None.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"`
// Only applies to Service Type: LoadBalancer.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
// Deprecated: This field was under-specified and its meaning varies across implementations,
// and it cannot support dual-stack.
// As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available.
// This field may be removed in a future API version.
// +optional
LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
// If specified and supported by the platform, this will restrict traffic through the cloud-provider
// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
// cloud-provider does not support the feature."
// More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
// +optional
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
// externalName is the external reference that discovery mechanisms will
// return as an alias for this service (e.g. a DNS CNAME record). No
// proxying will be involved. Must be a lowercase RFC-1123 hostname
// (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
// +optional
ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"`
// externalTrafficPolicy denotes if this Service desires to route external
// traffic to node-local or cluster-wide endpoints. "Local" preserves the
// client source IP and avoids a second hop for LoadBalancer and Nodeport
// type services, but risks potentially imbalanced traffic spreading.
// "Cluster" obscures the client source IP and may cause a second hop to
// another node, but should have good overall load-spreading.
// +optional
ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"`
// healthCheckNodePort specifies the healthcheck nodePort for the service.
// This only applies when type is set to LoadBalancer and
// externalTrafficPolicy is set to Local. If a value is specified, is
// in-range, and is not in use, it will be used. If not specified, a value
// will be automatically allocated. External systems (e.g. load-balancers)
// can use this port to determine if a given node holds endpoints for this
// service or not. If this field is specified when creating a Service
// which does not need it, creation will fail. This field will be wiped
// when updating a Service to no longer need it (e.g. changing type).
// +optional
HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"`
// publishNotReadyAddresses indicates that any agent which deals with endpoints for this
// Service should disregard any indications of ready/not-ready.
// The primary use case for setting this field is for a StatefulSet's Headless Service to
// propagate SRV DNS records for its Pods for the purpose of peer discovery.
// The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
// Services interpret this to mean that all endpoints are considered "ready" even if the
// Pods themselves are not. Agents which consume only Kubernetes generated endpoints
// through the Endpoints or EndpointSlice resources can safely assume this behavior.
// +optional
PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
// sessionAffinityConfig contains the configurations of session affinity.
// +optional
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
// TopologyKeys is tombstoned to show why 16 is reserved protobuf tag.
//TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"`
// IPFamily is tombstoned to show why 15 is a reserved protobuf tag.
// IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"`
// IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
// service. This field is usually assigned automatically based on cluster
// configuration and the ipFamilyPolicy field. If this field is specified
// manually, the requested family is available in the cluster,
// and ipFamilyPolicy allows it, it will be used; otherwise creation of
// the service will fail. This field is conditionally mutable: it allows
// for adding or removing a secondary IP family, but it does not allow
// changing the primary IP family of the Service. Valid values are "IPv4"
// and "IPv6". This field only applies to Services of types ClusterIP,
// NodePort, and LoadBalancer, and does apply to "headless" services.
// This field will be wiped when updating a Service to type ExternalName.
//
// This field may hold a maximum of two entries (dual-stack families, in
// either order). These families must correspond to the values of the
// clusterIPs field, if specified. Both clusterIPs and ipFamilies are
// governed by the ipFamilyPolicy field.
// +listType=atomic
// +optional
IPFamilies []IPFamily `json:"ipFamilies,omitempty" protobuf:"bytes,19,opt,name=ipFamilies,casttype=IPFamily"`
// IPFamilyPolicy represents the dual-stack-ness requested or required by
// this Service. If there is no value provided, then this field will be set
// to SingleStack. Services can be "SingleStack" (a single IP family),
// "PreferDualStack" (two IP families on dual-stack configured clusters or
// a single IP family on single-stack clusters), or "RequireDualStack"
// (two IP families on dual-stack configured clusters, otherwise fail). The
// ipFamilies and clusterIPs fields depend on the value of this field. This
// field will be wiped when updating a service to type ExternalName.
// +optional
IPFamilyPolicy *IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicyType"`
// allocateLoadBalancerNodePorts defines if NodePorts will be automatically
// allocated for services with type LoadBalancer. Default is "true". It
// may be set to "false" if the cluster load-balancer does not rely on
// NodePorts. If the caller requests specific NodePorts (by specifying a
// value), those requests will be respected, regardless of this field.
// This field may only be set for services with type LoadBalancer and will
// be cleared if the type is changed to any other type.
// +optional
AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty" protobuf:"bytes,20,opt,name=allocateLoadBalancerNodePorts"`
// loadBalancerClass is the class of the load balancer implementation this Service belongs to.
// If specified, the value of this field must be a label-style identifier, with an optional prefix,
// e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users.
// This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load
// balancer implementation is used, today this is typically done through the cloud provider integration,
// but should apply for any default implementation. If set, it is assumed that a load balancer
// implementation is watching for Services with a matching class. Any default load balancer
// implementation (e.g. cloud providers) should ignore Services that set this field.
// This field can only be set when creating or updating a Service to type 'LoadBalancer'.
// Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
// +featureGate=LoadBalancerClass
// +optional
LoadBalancerClass *string `json:"loadBalancerClass,omitempty" protobuf:"bytes,21,opt,name=loadBalancerClass"`
// InternalTrafficPolicy specifies if the cluster internal traffic
// should be routed to all endpoints or node-local endpoints only.
// "Cluster" routes internal traffic to a Service to all endpoints.
// "Local" routes traffic to node-local endpoints only, traffic is
// dropped if no node-local endpoints are ready.
// The default value is "Cluster".
// +featureGate=ServiceInternalTrafficPolicy
// +optional
InternalTrafficPolicy *ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"`
}
// ServicePort contains information on service's port.
type ServicePort struct {
// The name of this port within the service. This must be a DNS_LABEL.
// All ports within a ServiceSpec must have unique names. When considering
// the endpoints for a Service, this must match the 'name' field in the
// EndpointPort.
// Optional if only one ServicePort is defined on this service.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
// Default is TCP.
// +default="TCP"
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
// The application protocol for this port.
// This field follows standard Kubernetes label syntax.
// Un-prefixed names are reserved for IANA standard service names (as per
// RFC-6335 and https://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
// mycompany.com/my-custom-protocol.
// +optional
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"`
// The port that will be exposed by this service.
Port int32 `json:"port" protobuf:"varint,3,opt,name=port"`
// Number or name of the port to access on the pods targeted by the service.
// Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
// If this is a string, it will be looked up as a named port in the
// target Pod's container ports. If this is not specified, the value
// of the 'port' field is used (an identity map).
// This field is ignored for services with clusterIP=None, and should be
// omitted or set equal to the 'port' field.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
// +optional
TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"`
// The port on each node on which this service is exposed when type is
// NodePort or LoadBalancer. Usually assigned by the system. If a value is
// specified, in-range, and not in use it will be used, otherwise the
// operation will fail. If not specified, a port will be allocated if this
// Service requires one. If this field is specified when creating a
// Service which does not need it, creation will fail. This field will be
// wiped when updating a Service to no longer need it (e.g. changing type
// from NodePort to ClusterIP).
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
// +optional
NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"`
}
// +genclient
// +genclient:skipVerbs=deleteCollection
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Service is a named abstraction of software service (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the selector that determines which pods
// will answer requests sent through the proxy.
type Service struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a service.
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the service.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
const (
// ClusterIPNone - do not assign a cluster IP
// no proxying required and no environment variables should be created for pods
ClusterIPNone = "None"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceList holds a list of services.
type ServiceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of services
Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccount binds together:
// * a name, understood by users, and perhaps by peripheral systems, for an identity
// * a principal that can be authenticated and authorized
// * a set of secrets
type ServiceAccount struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use.
// Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true".
// This field should not be used to find auto-generated service account token secrets for use outside of pods.
// Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
// in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
// can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
// More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
// +optional
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"`
// AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
// Can be overridden at the pod level.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccountList is a list of ServiceAccount objects
type ServiceAccountList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ServiceAccounts.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Endpoints is a collection of endpoints that implement the actual service. Example:
// Name: "mysvc",
// Subsets: [
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// },
// {
// Addresses: [{"ip": "10.10.3.3"}],
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
type Endpoints struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The set of all endpoints is the union of all subsets. Addresses are placed into
// subsets according to the IPs they share. A single address with multiple ports,
// some of which are ready and some of which are not (because they come from
// different containers) will result in the address being displayed in different
// subsets for the different ports. No address will appear in both Addresses and
// NotReadyAddresses in the same subset.
// Sets of addresses and ports that comprise a service.
// +optional
Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"`
}
// EndpointSubset is a group of addresses with a common set of ports. The
// expanded set of endpoints is the Cartesian product of Addresses x Ports.
// For example, given:
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// }
// The resulting set of endpoints can be viewed as:
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
type EndpointSubset struct {
// IP addresses which offer the related ports that are marked as ready. These endpoints
// should be considered safe for load balancers and clients to utilize.
// +optional
Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"`
// IP addresses which offer the related ports but are not currently marked as ready
// because they have not yet finished starting, have recently failed a readiness check,
// or have recently failed a liveness check.
// +optional
NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"`
// Port numbers available on the related IP addresses.
// +optional
Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
}
// EndpointAddress is a tuple that describes single IP address.
// +structType=atomic
type EndpointAddress struct {
// The IP of this endpoint.
// May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
// or link-local multicast ((224.0.0.0/24).
// IPv6 is also accepted but not fully supported on all platforms. Also, certain
// kubernetes components, like kube-proxy, are not IPv6 ready.
// TODO: This should allow hostname or IP, See #4447.
IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
// The Hostname of this endpoint
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
// Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
// +optional
NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"`
// Reference to object providing the endpoint.
// +optional
TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"`
}
// EndpointPort is a tuple that describes a single port.
// +structType=atomic
type EndpointPort struct {
// The name of this port. This must match the 'name' field in the
// corresponding ServicePort.
// Must be a DNS_LABEL.
// Optional only if one port is defined.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The port number of the endpoint.
Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
// The IP protocol for this port.
// Must be UDP, TCP, or SCTP.
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
// The application protocol for this port.
// This field follows standard Kubernetes label syntax.
// Un-prefixed names are reserved for IANA standard service names (as per
// RFC-6335 and https://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
// mycompany.com/my-custom-protocol.
// +optional
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EndpointsList is a list of endpoints.
type EndpointsList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of endpoints.
Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// NodeSpec describes the attributes that a node is created with.
type NodeSpec struct {
// PodCIDR represents the pod IP range assigned to the node.
// +optional
PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
// podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this
// field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for
// each of IPv4 and IPv6.
// +optional
// +patchStrategy=merge
PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"`
// ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
// +optional
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
// +optional
Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
// If specified, the node's taints.
// +optional
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
// Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26.
// +optional
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
// Deprecated. Not all kubelets will set this field. Remove field after 1.13.
// see: https://issues.k8s.io/61966
// +optional
DoNotUseExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
}
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
// This API is deprecated since 1.22
type NodeConfigSource struct {
// For historical context, regarding the below kind, apiVersion, and configMapRef deprecation tags:
// 1. kind/apiVersion were used by the kubelet to persist this struct to disk (they had no protobuf tags)
// 2. configMapRef and proto tag 1 were used by the API to refer to a configmap,
// but used a generic ObjectReference type that didn't really have the fields we needed
// All uses/persistence of the NodeConfigSource struct prior to 1.11 were gated by alpha feature flags,
// so there was no persisted data for these fields that needed to be migrated/handled.
// +k8s:deprecated=kind
// +k8s:deprecated=apiVersion
// +k8s:deprecated=configMapRef,protobuf=1
// ConfigMap is a reference to a Node's ConfigMap
ConfigMap *ConfigMapNodeConfigSource `json:"configMap,omitempty" protobuf:"bytes,2,opt,name=configMap"`
}
// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
// This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration
type ConfigMapNodeConfigSource struct {
// Namespace is the metadata.namespace of the referenced ConfigMap.
// This field is required in all cases.
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
// Name is the metadata.name of the referenced ConfigMap.
// This field is required in all cases.
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
// UID is the metadata.UID of the referenced ConfigMap.
// This field is forbidden in Node.Spec, and required in Node.Status.
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"`
// ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap.
// This field is forbidden in Node.Spec, and required in Node.Status.
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
// KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure
// This field is required in all cases.
KubeletConfigKey string `json:"kubeletConfigKey" protobuf:"bytes,5,opt,name=kubeletConfigKey"`
}
// DaemonEndpoint contains information about a single Daemon endpoint.
type DaemonEndpoint struct {
/*
The port tag was not properly in quotes in earlier releases, so it must be
uppercased for backwards compat (since it was falling back to var name of
'Port').
*/
// Port number of the given endpoint.
Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"`
}
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
type NodeDaemonEndpoints struct {
// Endpoint on which Kubelet is listening.
// +optional
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid
SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"`
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
// Kubelet Version reported by the node.
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
// KubeProxy Version reported by the node.
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
}
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
type NodeConfigStatus struct {
// Assigned reports the checkpointed config the node will try to use.
// When Node.Spec.ConfigSource is updated, the node checkpoints the associated
// config payload to local disk, along with a record indicating intended
// config. The node refers to this record to choose its config checkpoint, and
// reports this record in Assigned. Assigned only updates in the status after
// the record has been checkpointed to disk. When the Kubelet is restarted,
// it tries to make the Assigned config the Active config by loading and
// validating the checkpointed payload identified by Assigned.
// +optional
Assigned *NodeConfigSource `json:"assigned,omitempty" protobuf:"bytes,1,opt,name=assigned"`
// Active reports the checkpointed config the node is actively using.
// Active will represent either the current version of the Assigned config,
// or the current LastKnownGood config, depending on whether attempting to use the
// Assigned config results in an error.
// +optional
Active *NodeConfigSource `json:"active,omitempty" protobuf:"bytes,2,opt,name=active"`
// LastKnownGood reports the checkpointed config the node will fall back to
// when it encounters an error attempting to use the Assigned config.
// The Assigned config becomes the LastKnownGood config when the node determines
// that the Assigned config is stable and correct.
// This is currently implemented as a 10-minute soak period starting when the local
// record of Assigned config is updated. If the Assigned config is Active at the end
// of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
// reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
// because the local default config is always assumed good.
// You should not make assumptions about the node's method of determining config stability
// and correctness, as this may change or become configurable in the future.
// +optional
LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" protobuf:"bytes,3,opt,name=lastKnownGood"`
// Error describes any problems reconciling the Spec.ConfigSource to the Active config.
// Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
// record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
// to load or validate the Assigned config, etc.
// Errors may occur at different points while syncing config. Earlier errors (e.g. download or
// checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
// Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
// a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
// by fixing the config assigned in Spec.ConfigSource.
// You can find additional information for debugging by searching the error message in the Kubelet log.
// Error is a human-readable description of the error state; machines can check whether or not Error
// is empty, but should not rely on the stability of the Error text across Kubelet versions.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// NodeStatus is information about the current status of a node.
type NodeStatus struct {
// Capacity represents the total resources of a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Allocatable represents the resources of a node that are available for scheduling.
// Defaults to Capacity.
// +optional
Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"`
// NodePhase is the recently observed lifecycle phase of the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
// The field is never populated, and now is deprecated.
// +optional
Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
// Conditions is an array of current observed node conditions.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
// List of addresses reachable to the node.
// Queried from cloud provider, if available.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
// Note: This field is declared as mergeable, but the merge key is not sufficiently
// unique, which can cause data corruption when it is merged. Callers should instead
// use a full-replacement patch. See http://pr.k8s.io/79391 for an example.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
// Endpoints of daemons running on the Node.
// +optional
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
// Set of ids/uuids to uniquely identify the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#info
// +optional
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
// List of container images on this node
// +optional
Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
// List of attachable volumes in use (mounted) by the node.
// +optional
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
// List of volumes that are attached to the node.
// +optional
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
// Status of the config assigned to the node via the dynamic Kubelet config feature.
// +optional
Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
}
type UniqueVolumeName string
// AttachedVolume describes a volume attached to a node
type AttachedVolume struct {
// Name of the attached volume
Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"`
// DevicePath represents the device path where the volume should be available
DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"`
}
// AvoidPods describes pods that should avoid this node. This is the value for a
// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and
// will eventually become a field of NodeStatus.
type AvoidPods struct {
// Bounded-sized list of signatures of pods that should avoid this node, sorted
// in timestamp order from oldest to newest. Size of the slice is unspecified.
// +optional
PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"`
}
// Describes a class of pods that should avoid this node.
type PreferAvoidPodsEntry struct {
// The class of pods.
PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"`
// Time at which this entry was added to the list.
// +optional
EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"`
// (brief) reason why this entry was added to the list.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Human readable message indicating why this entry was added to the list.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
}
// Describes the class of pods that should avoid this node.
// Exactly one field should be set.
type PodSignature struct {
// Reference to controller whose pods should avoid this node.
// +optional
PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"`
}
// Describe a container image
type ContainerImage struct {
// Names by which this image is known.
// e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
// +optional
Names []string `json:"names" protobuf:"bytes,1,rep,name=names"`
// The size of the image in bytes.
// +optional
SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"`
}
// +enum
type NodePhase string
// These are the valid phases of node.
const (
// NodePending means the node has been created/added by the system, but not configured.
NodePending NodePhase = "Pending"
// NodeRunning means the node has been configured and has Kubernetes components running.
NodeRunning NodePhase = "Running"
// NodeTerminated means the node has been removed from the cluster.
NodeTerminated NodePhase = "Terminated"
)
type NodeConditionType string
// These are valid but not exhaustive conditions of node. A cloud provider may set a condition not listed here.
// The built-in set of conditions are:
// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable.
const (
// NodeReady means kubelet is healthy and ready to accept pods.
NodeReady NodeConditionType = "Ready"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
NodeDiskPressure NodeConditionType = "DiskPressure"
// NodePIDPressure means the kubelet is under pressure due to insufficient available PID.
NodePIDPressure NodeConditionType = "PIDPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
)
// NodeCondition contains condition information for a node.
type NodeCondition struct {
// Type of node condition.
Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we got an update on a given condition.
// +optional
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"`
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
type NodeAddressType string
// These are built-in addresses type of node. A cloud provider may set a type not listed here.
const (
// NodeHostName identifies a name of the node. Although every node can be assumed
// to have a NodeAddress of this type, its exact syntax and semantics are not
// defined, and are not consistent between different clusters.
NodeHostName NodeAddressType = "Hostname"
// NodeInternalIP identifies an IP address which is assigned to one of the node's
// network interfaces. Every node should have at least one address of this type.
//
// An internal IP is normally expected to be reachable from every other node, but
// may not be visible to hosts outside the cluster. By default it is assumed that
// kube-apiserver can reach node internal IPs, though it is possible to configure
// clusters where this is not the case.
//
// NodeInternalIP is the default type of node IP, and does not necessarily imply
// that the IP is ONLY reachable internally. If a node has multiple internal IPs,
// no specific semantics are assigned to the additional IPs.
NodeInternalIP NodeAddressType = "InternalIP"
// NodeExternalIP identifies an IP address which is, in some way, intended to be
// more usable from outside the cluster then an internal IP, though no specific
// semantics are defined. It may be a globally routable IP, though it is not
// required to be.
//
// External IPs may be assigned directly to an interface on the node, like a
// NodeInternalIP, or alternatively, packets sent to the external IP may be NAT'ed
// to an internal node IP rather than being delivered directly (making the IP less
// efficient for node-to-node traffic than a NodeInternalIP).
NodeExternalIP NodeAddressType = "ExternalIP"
// NodeInternalDNS identifies a DNS name which resolves to an IP address which has
// the characteristics of a NodeInternalIP. The IP it resolves to may or may not
// be a listed NodeInternalIP address.
NodeInternalDNS NodeAddressType = "InternalDNS"
// NodeExternalDNS identifies a DNS name which resolves to an IP address which has
// the characteristics of a NodeExternalIP. The IP it resolves to may or may not
// be a listed NodeExternalIP address.
NodeExternalDNS NodeAddressType = "ExternalDNS"
)
// NodeAddress contains information for the node's address.
type NodeAddress struct {
// Node address type, one of Hostname, ExternalIP or InternalIP.
Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"`
// The node address.
Address string `json:"address" protobuf:"bytes,2,opt,name=address"`
}
// ResourceName is the name identifying various resources in a ResourceList.
type ResourceName string
// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
// with the -, _, and . characters allowed anywhere, except the first or last character.
// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
// camel case, separating compound words.
// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
const (
// CPU, in cores. (500m = .5 cores)
ResourceCPU ResourceName = "cpu"
// Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceMemory ResourceName = "memory"
// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
ResourceStorage ResourceName = "storage"
// Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceEphemeralStorage is alpha and it can change across releases.
ResourceEphemeralStorage ResourceName = "ephemeral-storage"
)
const (
// Default namespace prefix.
ResourceDefaultNamespacePrefix = "kubernetes.io/"
// Name prefix for huge page resources (alpha).
ResourceHugePagesPrefix = "hugepages-"
// Name prefix for storage resource limits
ResourceAttachableVolumesPrefix = "attachable-volumes-"
)
// ResourceList is a set of (resource name, quantity) pairs.
type ResourceList map[ResourceName]resource.Quantity
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Node is a worker node in Kubernetes.
// Each node will have a unique identifier in the cache (i.e. in etcd).
type Node struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a node.
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the node.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeList is the whole list of all Nodes which have been registered with master.
type NodeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of nodes
Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// FinalizerName is the name identifying a finalizer during namespace lifecycle.
type FinalizerName string
// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or
// in metav1.
const (
FinalizerKubernetes FinalizerName = "kubernetes"
)
// NamespaceSpec describes the attributes on a Namespace.
type NamespaceSpec struct {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional
Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"`
}
// NamespaceStatus is information about the current status of a Namespace.
type NamespaceStatus struct {
// Phase is the current lifecycle phase of the namespace.
// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional
Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"`
// Represents the latest available observations of a namespace's current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// +enum
type NamespacePhase string
// These are the valid phases of a namespace.
const (
// NamespaceActive means the namespace is available for use in the system
NamespaceActive NamespacePhase = "Active"
// NamespaceTerminating means the namespace is undergoing graceful termination
NamespaceTerminating NamespacePhase = "Terminating"
)
const (
// NamespaceTerminatingCause is returned as a defaults.cause item when a change is
// forbidden due to the namespace being terminated.
NamespaceTerminatingCause metav1.CauseType = "NamespaceTerminating"
)
type NamespaceConditionType string
// These are built-in conditions of a namespace.
const (
// NamespaceDeletionDiscoveryFailure contains information about namespace deleter errors during resource discovery.
NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure"
// NamespaceDeletionContentFailure contains information about namespace deleter errors during deletion of resources.
NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure"
// NamespaceDeletionGVParsingFailure contains information about namespace deleter errors parsing GV for legacy types.
NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure"
// NamespaceContentRemaining contains information about resources remaining in a namespace.
NamespaceContentRemaining NamespaceConditionType = "NamespaceContentRemaining"
// NamespaceFinalizersRemaining contains information about which finalizers are on resources remaining in a namespace.
NamespaceFinalizersRemaining NamespaceConditionType = "NamespaceFinalizersRemaining"
)
// NamespaceCondition contains details about state of namespace.
type NamespaceCondition struct {
// Type of namespace controller condition.
Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:skipVerbs=deleteCollection
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Namespace provides a scope for Names.
// Use of multiple namespaces is optional.
type Namespace struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of the Namespace.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status describes the current status of a Namespace.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NamespaceList is a list of Namespaces.
type NamespaceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of Namespace objects in the list.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
// Deprecated in 1.7, please use the bindings subresource of pods instead.
type Binding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The target object that you want to bind to the standard object.
Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"`
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
// +k8s:openapi-gen=false
type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodLogOptions is the query options for a Pod's logs REST call.
type PodLogOptions struct {
metav1.TypeMeta `json:",inline"`
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
// Follow the log stream of the pod. Defaults to false.
// +optional
Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
// Return previous terminated container logs. Defaults to false.
// +optional
Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
// A relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
// An RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output. Defaults to false.
// +optional
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
// If set, the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
// +optional
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
// If set, the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
// +optional
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
// insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the
// serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver
// and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real
// kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the
// connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept
// the actual log data coming from the real kubelet).
// +optional
InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodAttachOptions is the query options to a Pod's remote attach call.
// ---
// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodAttachOptions struct {
metav1.TypeMeta `json:",inline"`
// Stdin if true, redirects the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Stdout if true indicates that stdout is to be redirected for the attach call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Stderr if true indicates that stderr is to be redirected for the attach call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the attach call.
// This is passed through the container runtime so the tty
// is allocated on the worker node by the container runtime.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// The container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodExecOptions is the query options to a Pod's remote exec call.
// ---
// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodExecOptions struct {
metav1.TypeMeta `json:",inline"`
// Redirect the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Redirect the standard output stream of the pod for this call.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Redirect the standard error stream of the pod for this call.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the exec call.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// Container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
// Command is the remote command to execute. argv array. Not executed within a shell.
Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodPortForwardOptions is the query options to a Pod's port forward call
// when using WebSockets.
// The `port` query parameter must specify the port or
// ports (comma separated) to forward over.
// Port forwarding over SPDY does not use these options. It requires the port
// to be passed in the `port` header as part of request.
type PodPortForwardOptions struct {
metav1.TypeMeta `json:",inline"`
// List of ports to forward
// Required when using WebSockets
// +optional
Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodProxyOptions is the query options to a Pod's proxy call.
type PodProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to pod.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeProxyOptions is the query options to a Node's proxy call.
type NodeProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to node.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceProxyOptions is the query options to a Service's proxy call.
type ServiceProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the part of URLs that include service endpoints, suffixes,
// and parameters to use for the current proxy request to service.
// For example, the whole request URL is
// http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
// Path is _search?q=user:kimchy.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
// ---
// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
// 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.
// 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
// Those cannot be well described when embedded.
// 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
// 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity
// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple
// and the version of the actual struct is irrelevant.
// 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type
// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +structType=atomic
type ObjectReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
// Namespace of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
// UID of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// API version of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"`
// Specific resourceVersion to which this reference is made, if any.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// If referring to a piece of an object instead of an entire object, this string
// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
// For example, if the object reference is to a container within a pod, this would take on a value like:
// "spec.containers{name}" (where "name" refers to the name of the container that triggered
// the event) or if no container name is specified "spec.containers[2]" (container with
// index 2 in this pod). This syntax is chosen only to have some well-defined way of
// referencing a part of an object.
// TODO: this design is not final and this field is subject to change in the future.
// +optional
FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"`
}
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
// +structType=atomic
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// TODO: Add other useful fields. apiVersion, kind, uid?
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
}
// TypedLocalObjectReference contains enough information to let you locate the
// typed referenced object inside the same namespace.
// +structType=atomic
type TypedLocalObjectReference struct {
// APIGroup is the group for the resource being referenced.
// If APIGroup is not specified, the specified Kind must be in the core API group.
// For any other third-party types, APIGroup is required.
// +optional
APIGroup *string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"`
// Kind is the type of resource being referenced
Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
// Name is the name of resource being referenced
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SerializedReference is a reference to serialized object.
type SerializedReference struct {
metav1.TypeMeta `json:",inline"`
// The reference to an object in the system.
// +optional
Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
}
// EventSource contains information for an event.
type EventSource struct {
// Component from which the event is generated.
// +optional
Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
// Node name on which the event is generated.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// Valid values for event types (new types could be added in future)
const (
// Information only and will not cause any problems
EventTypeNormal string = "Normal"
// These events are to warn that something might go wrong
EventTypeWarning string = "Warning"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Event is a report of an event somewhere in the cluster. Events
// have a limited retention time and triggers and messages may evolve
// with time. Event consumers should not rely on the timing of an event
// with a given Reason reflecting a consistent underlying trigger, or the
// continued existence of events with that Reason. Events should be
// treated as informative, best-effort, supplemental data.
type Event struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
// The object that this event is about.
InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"`
// This should be a short, machine understandable string that gives the reason
// for the transition into the object's current status.
// TODO: provide exact specification for format.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// A human-readable description of the status of this operation.
// TODO: decide on maximum length.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// The component reporting this event. Should be a short machine understandable string.
// +optional
Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"`
// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
// +optional
FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"`
// The time at which the most recent occurrence of this event was recorded.
// +optional
LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"`
// The number of times this event has occurred.
// +optional
Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"`
// Type of this event (Normal, Warning), new types could be added in the future
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"`
// Time when this Event was first observed.
// +optional
EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"`
// Data about the Event series this event represents or nil if it's a singleton Event.
// +optional
Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"`
// What action was taken/failed regarding to the Regarding object.
// +optional
Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"`
// Optional secondary object for more complex actions.
// +optional
Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"`
// Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
// +optional
ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"`
// ID of the controller instance, e.g. `kubelet-xyzf`.
// +optional
ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"`
}
// EventSeries contain information on series of events, i.e. thing that was/is happening
// continuously for some time.
type EventSeries struct {
// Number of occurrences in this series up to the last heartbeat time
Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"`
// Time of the last occurrence observed
LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"`
// +k8s:deprecated=state,protobuf=3
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EventList is a list of events.
type EventList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of events
Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// List holds a list of objects, which may not be known by the server.
type List metav1.List
// LimitType is a type of object that is limited. It can be Pod, Container, PersistentVolumeClaim or
// a fully qualified resource name.
type LimitType string
const (
// Limit that applies to all pods in a namespace
LimitTypePod LimitType = "Pod"
// Limit that applies to all containers in a namespace
LimitTypeContainer LimitType = "Container"
// Limit that applies to all persistent volume claims in a namespace
LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim"
)
// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
type LimitRangeItem struct {
// Type of resource that this limit applies to.
Type LimitType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
// Max usage constraints on this kind by resource name.
// +optional
Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"`
// Min usage constraints on this kind by resource name.
// +optional
Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"`
// Default resource requirement limit value by resource name if resource limit is omitted.
// +optional
Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"`
// DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
// +optional
DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"`
// MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
// +optional
MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"`
}
// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
type LimitRangeSpec struct {
// Limits is the list of LimitRangeItem objects that are enforced.
Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
type LimitRange struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the limits enforced.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRangeList is a list of LimitRange items.
type LimitRangeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of LimitRange objects.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// The following identify resource constants for Kubernetes object types
const (
// Pods, number
ResourcePods ResourceName = "pods"
// Services, number
ResourceServices ResourceName = "services"
// ReplicationControllers, number
ResourceReplicationControllers ResourceName = "replicationcontrollers"
// ResourceQuotas, number
ResourceQuotas ResourceName = "resourcequotas"
// ResourceSecrets, number
ResourceSecrets ResourceName = "secrets"
// ResourceConfigMaps, number
ResourceConfigMaps ResourceName = "configmaps"
// ResourcePersistentVolumeClaims, number
ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
// ResourceServicesNodePorts, number
ResourceServicesNodePorts ResourceName = "services.nodeports"
// ResourceServicesLoadBalancers, number
ResourceServicesLoadBalancers ResourceName = "services.loadbalancers"
// CPU request, in cores. (500m = .5 cores)
ResourceRequestsCPU ResourceName = "requests.cpu"
// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsMemory ResourceName = "requests.memory"
// Storage request, in bytes
ResourceRequestsStorage ResourceName = "requests.storage"
// Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage"
// CPU limit, in cores. (500m = .5 cores)
ResourceLimitsCPU ResourceName = "limits.cpu"
// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsMemory ResourceName = "limits.memory"
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
)
// The following identify resource prefix for Kubernetes object types
const (
// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
ResourceRequestsHugePagesPrefix = "requests.hugepages-"
// Default resource requests prefix
DefaultResourceRequestsPrefix = "requests."
)
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
// +enum
type ResourceQuotaScope string
const (
// Match all pod objects where spec.activeDeadlineSeconds >=0
ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
// Match all pod objects where spec.activeDeadlineSeconds is nil
ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
// Match all pod objects that have best effort quality of service
ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
// Match all pod objects that do not have best effort quality of service
ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
// Match all pod objects that have priority class mentioned
ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass"
// Match all pod objects that have cross-namespace pod (anti)affinity mentioned.
ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity"
)
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
type ResourceQuotaSpec struct {
// hard is the set of desired hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// A collection of filters that must match each object tracked by a quota.
// If not specified, the quota matches all objects.
// +optional
Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"`
// scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
// but expressed using ScopeSelectorOperator in combination with possible values.
// For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
// +optional
ScopeSelector *ScopeSelector `json:"scopeSelector,omitempty" protobuf:"bytes,3,opt,name=scopeSelector"`
}
// A scope selector represents the AND of the selectors represented
// by the scoped-resource selector requirements.
// +structType=atomic
type ScopeSelector struct {
// A list of scope selector requirements by scope of the resources.
// +optional
MatchExpressions []ScopedResourceSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"`
}
// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
// that relates the scope name and values.
type ScopedResourceSelectorRequirement struct {
// The name of the scope that the selector applies to.
ScopeName ResourceQuotaScope `json:"scopeName" protobuf:"bytes,1,opt,name=scopeName"`
// Represents a scope's relationship to a set of values.
// Valid operators are In, NotIn, Exists, DoesNotExist.
Operator ScopeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=ScopedResourceSelectorOperator"`
// An array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty.
// This array is replaced during a strategic merge patch.
// +optional
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
// A scope selector operator is the set of operators that can be used in
// a scope selector requirement.
// +enum
type ScopeSelectorOperator string
const (
ScopeSelectorOpIn ScopeSelectorOperator = "In"
ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn"
ScopeSelectorOpExists ScopeSelectorOperator = "Exists"
ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist"
)
// ResourceQuotaStatus defines the enforced hard limits and observed use.
type ResourceQuotaStatus struct {
// Hard is the set of enforced hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// Used is the current observed total usage of the resource in the namespace.
// +optional
Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuota sets aggregate quota restrictions enforced per namespace
type ResourceQuota struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired quota.
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status defines the actual enforced quota and its current usage.
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuotaList is a list of ResourceQuota items.
type ResourceQuotaList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of ResourceQuota objects.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Secret holds secret data of a certain type. The total bytes of the values in
// the Data field must be less than MaxSecretSize bytes.
type Secret struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Immutable, if set to true, ensures that data stored in the Secret cannot
// be updated (only object metadata can be modified).
// If not set to true, the field can be modified at any time.
// Defaulted to nil.
// +optional
Immutable *bool `json:"immutable,omitempty" protobuf:"varint,5,opt,name=immutable"`
// Data contains the secret data. Each key must consist of alphanumeric
// characters, '-', '_' or '.'. The serialized form of the secret data is a
// base64 encoded string, representing the arbitrary (possibly non-string)
// data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
// +optional
Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
// stringData allows specifying non-binary secret data in string form.
// It is provided as a write-only input field for convenience.
// All keys and values are merged into the data field on write, overwriting any existing values.
// The stringData field is never output when reading from the API.
// +k8s:conversion-gen=false
// +optional
StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"`
// Used to facilitate programmatic handling of secret data.
// More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types
// +optional
Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"`
}
const MaxSecretSize = 1 * 1024 * 1024
type SecretType string
const (
// SecretTypeOpaque is the default. Arbitrary user-defined data
SecretTypeOpaque SecretType = "Opaque"
// SecretTypeServiceAccountToken contains a token that identifies a service account to the API
//
// Required fields:
// - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies
// - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies
// - Secret.Data["token"] - a token that identifies the service account to the API
SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
// ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountNameKey = "kubernetes.io/service-account.name"
// ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountUIDKey = "kubernetes.io/service-account.uid"
// ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets
ServiceAccountTokenKey = "token"
// ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets
ServiceAccountKubeconfigKey = "kubernetes.kubeconfig"
// ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets
ServiceAccountRootCAKey = "ca.crt"
// ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls
ServiceAccountNamespaceKey = "namespace"
// SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg
//
// Required fields:
// - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file
SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg"
// DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets
DockerConfigKey = ".dockercfg"
// SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json
//
// Required fields:
// - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file
SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson"
// DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets
DockerConfigJsonKey = ".dockerconfigjson"
// SecretTypeBasicAuth contains data needed for basic authentication.
//
// Required at least one of fields:
// - Secret.Data["username"] - username used for authentication
// - Secret.Data["password"] - password or token needed for authentication
SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth"
// BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets
BasicAuthUsernameKey = "username"
// BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets
BasicAuthPasswordKey = "password"
// SecretTypeSSHAuth contains data needed for SSH authetication.
//
// Required field:
// - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication
SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth"
// SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets
SSHAuthPrivateKey = "ssh-privatekey"
// SecretTypeTLS contains information about a TLS client or server secret. It
// is primarily used with TLS termination of the Ingress resource, but may be
// used in other types.
//
// Required fields:
// - Secret.Data["tls.key"] - TLS private key.
// Secret.Data["tls.crt"] - TLS certificate.
// TODO: Consider supporting different formats, specifying CA/destinationCA.
SecretTypeTLS SecretType = "kubernetes.io/tls"
// TLSCertKey is the key for tls certificates in a TLS secret.
TLSCertKey = "tls.crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls.key"
// SecretTypeBootstrapToken is used during the automated bootstrap process (first
// implemented by kubeadm). It stores tokens that are used to sign well known
// ConfigMaps. They are used for authn.
SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SecretList is a list of Secret.
type SecretList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of secret objects.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMap holds configuration data for pods to consume.
type ConfigMap struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Immutable, if set to true, ensures that data stored in the ConfigMap cannot
// be updated (only object metadata can be modified).
// If not set to true, the field can be modified at any time.
// Defaulted to nil.
// +optional
Immutable *bool `json:"immutable,omitempty" protobuf:"varint,4,opt,name=immutable"`
// Data contains the configuration data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// Values with non-UTF-8 byte sequences must use the BinaryData field.
// The keys stored in Data must not overlap with the keys in
// the BinaryData field, this is enforced during validation process.
// +optional
Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
// BinaryData contains the binary data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// BinaryData can contain byte sequences that are not in the UTF-8 range.
// The keys stored in BinaryData must not overlap with the ones in
// the Data field, this is enforced during validation process.
// Using this field will require 1.10+ apiserver and
// kubelet.
// +optional
BinaryData map[string][]byte `json:"binaryData,omitempty" protobuf:"bytes,3,rep,name=binaryData"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMapList is a resource containing a list of ConfigMap objects.
type ConfigMapList struct {
metav1.TypeMeta `json:",inline"`
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of ConfigMaps.
Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Type and constants for component health validation.
type ComponentConditionType string
// These are the valid conditions for the component.
const (
ComponentHealthy ComponentConditionType = "Healthy"
)
// Information about the condition of a component.
type ComponentCondition struct {
// Type of condition for a component.
// Valid value: "Healthy"
Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"`
// Status of the condition for a component.
// Valid values for "Healthy": "True", "False", or "Unknown".
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Message about the condition for a component.
// For example, information about a health check.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// Condition error code for a component.
// For example, a health check error code.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
// Deprecated: This API is deprecated in v1.19+
type ComponentStatus struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of component conditions observed
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Status of all the conditions for the component as a list of ComponentStatus objects.
// Deprecated: This API is deprecated in v1.19+
type ComponentStatusList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ComponentStatus objects.
Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// DownwardAPIVolumeSource represents a volume containing downward API info.
// Downward API volumes support ownership management and SELinux relabeling.
type DownwardAPIVolumeSource struct {
// Items is a list of downward API volume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// Optional: mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
const (
DownwardAPIVolumeSourceDefaultMode int32 = 0644
)
// DownwardAPIVolumeFile represents information to create the file containing the pod field
type DownwardAPIVolumeFile struct {
// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"`
// Optional: mode bits used to set permissions on this file, must be an octal value
// between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"`
}
// Represents downward API info for projecting into a projected volume.
// Note that this is identical to a downwardAPI volume source without the default
// mode.
type DownwardAPIProjection struct {
// Items is a list of DownwardAPIVolume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
}
// SecurityContext holds security configuration that will be applied to a container.
// Some fields are present in both SecurityContext and PodSecurityContext. When both
// are set, the values in SecurityContext take precedence.
type SecurityContext struct {
// The capabilities to add/drop when running containers.
// Defaults to the default set of capabilities granted by the container runtime.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"`
// Run container in privileged mode.
// Processes in privileged containers are essentially equivalent to root on the host.
// Defaults to false.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"`
// The SELinux context to be applied to the container.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"`
// The Windows specific settings applied to all containers.
// If unspecified, the options from the PodSecurityContext will be used.
// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
// Note that this field cannot be set when spec.os.name is linux.
// +optional
WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"`
// The GID to run the entrypoint of the container process.
// Uses runtime default if unset.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,8,opt,name=runAsGroup"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"`
// Whether this container has a read-only root filesystem.
// Default is false.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"`
// AllowPrivilegeEscalation controls whether a process can gain more
// privileges than its parent process. This bool directly controls if
// the no_new_privs flag will be set on the container process.
// AllowPrivilegeEscalation is true always when the container is:
// 1) run as Privileged
// 2) has CAP_SYS_ADMIN
// Note that this field cannot be set when spec.os.name is windows.
// +optional
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
// procMount denotes the type of proc mount to use for the containers.
// The default is DefaultProcMount which uses the container runtime defaults for
// readonly paths and masked paths.
// This requires the ProcMountType feature flag to be enabled.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
ProcMount *ProcMountType `json:"procMount,omitempty" protobuf:"bytes,9,opt,name=procMount"`
// The seccomp options to use by this container. If seccomp options are
// provided at both the pod & container level, the container options
// override the pod options.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,11,opt,name=seccompProfile"`
}
// +enum
type ProcMountType string
const (
// DefaultProcMount uses the container runtime defaults for readonly and masked
// paths for /proc. Most container runtimes mask certain paths in /proc to avoid
// accidental security exposure of special devices or information.
DefaultProcMount ProcMountType = "Default"
// UnmaskedProcMount bypasses the default masking behavior of the container
// runtime and ensures the newly created /proc the container stays in tact with
// no modifications.
UnmaskedProcMount ProcMountType = "Unmasked"
)
// SELinuxOptions are the labels to be applied to the container
type SELinuxOptions struct {
// User is a SELinux user label that applies to the container.
// +optional
User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"`
// Role is a SELinux role label that applies to the container.
// +optional
Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"`
// Type is a SELinux type label that applies to the container.
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"`
// Level is SELinux level label that applies to the container.
// +optional
Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"`
}
// WindowsSecurityContextOptions contain Windows-specific options and credentials.
type WindowsSecurityContextOptions struct {
// GMSACredentialSpecName is the name of the GMSA credential spec to use.
// +optional
GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"`
// GMSACredentialSpec is where the GMSA admission webhook
// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
// GMSA credential spec named by the GMSACredentialSpecName field.
// +optional
GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"`
// The UserName in Windows to run the entrypoint of the container process.
// Defaults to the user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"`
// HostProcess determines if a container should be run as a 'Host Process' container.
// This field is alpha-level and will only be honored by components that enable the
// WindowsHostProcessContainers feature flag. Setting this field without the feature
// flag will result in errors when validating the Pod. All of a Pod's containers must
// have the same effective HostProcess value (it is not allowed to have a mix of HostProcess
// containers and non-HostProcess containers). In addition, if HostProcess is true
// then HostNetwork must also be set to true.
// +optional
HostProcess *bool `json:"hostProcess,omitempty" protobuf:"bytes,4,opt,name=hostProcess"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RangeAllocation is not a public type.
type RangeAllocation struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Range is string that identifies the range represented by 'data'.
Range string `json:"range" protobuf:"bytes,2,opt,name=range"`
// Data is a bit array containing all allocated addresses in the previous segment.
Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"`
}
const (
// DefaultSchedulerName defines the name of default scheduler.
DefaultSchedulerName = "default-scheduler"
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule.
// When the --hard-pod-affinity-weight scheduler flag is not specified,
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
DefaultHardPodAffinitySymmetricWeight int32 = 1
)
// Sysctl defines a kernel parameter to be set
type Sysctl struct {
// Name of a property to set
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Value of a property to set
Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
}
// NodeResources is an object for conveying resource information about a node.
// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.
type NodeResources struct {
// Capacity represents the available resources of a node
Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
}
const (
// Enable stdin for remote command execution
ExecStdinParam = "input"
// Enable stdout for remote command execution
ExecStdoutParam = "output"
// Enable stderr for remote command execution
ExecStderrParam = "error"
// Enable TTY for remote command execution
ExecTTYParam = "tty"
// Command to run for remote command execution
ExecCommandParam = "command"
// Name of header that specifies stream type
StreamType = "streamType"
// Value for streamType header for stdin stream
StreamTypeStdin = "stdin"
// Value for streamType header for stdout stream
StreamTypeStdout = "stdout"
// Value for streamType header for stderr stream
StreamTypeStderr = "stderr"
// Value for streamType header for data stream
StreamTypeData = "data"
// Value for streamType header for error stream
StreamTypeError = "error"
// Value for streamType header for terminal resize stream
StreamTypeResize = "resize"
// Name of header that specifies the port being forwarded
PortHeader = "port"
// Name of header that specifies a request ID used to associate the error
// and data streams for a single forwarded connection
PortForwardRequestIDHeader = "requestID"
)
// PortStatus represents the error condition of a service port
type PortStatus struct {
// Port is the port number of the service port of which status is recorded here
Port int32 `json:"port" protobuf:"varint,1,opt,name=port"`
// Protocol is the protocol of the service port of which status is recorded here
// The supported values are: "TCP", "UDP", "SCTP"
Protocol Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
// Error is to record the problem with the service port
// The format of the error shall comply with the following rules:
// - built-in error values shall be specified in this file and those shall use
// CamelCase names
// - cloud provider specific error values must have names that comply with the
// format foo.example.com/CamelCase.
// ---
// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
// +optional
// +kubebuilder:validation:Required
// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
// +kubebuilder:validation:MaxLength=316
Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"`
}
Update types to be neutral about container runtime
Replace references to Docker / Docker Engine with more neutral
terminology.
Kubernetes-commit: b803c8ebe2d05891a82516fc28a8f7ea7e1bcd09
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
NamespaceDefault string = "default"
// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
NamespaceAll string = ""
// NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats)
NamespaceNodeLease string = "kube-node-lease"
)
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
type Volume struct {
// name of the volume.
// Must be a DNS_LABEL and unique within the pod.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// volumeSource represents the location and type of the mounted volume.
// If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"`
}
// Represents the source of a volume to mount.
// Only one of its members may be specified.
type VolumeSource struct {
// hostPath represents a pre-existing file or directory on the host
// machine that is directly exposed to the container. This is generally
// used for system agents or other privileged things that are allowed
// to see the host machine. Most containers will NOT need this.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// ---
// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
// mount host directories as read/write.
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"`
// emptyDir represents a temporary directory that shares a pod's lifetime.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
// gcePersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
// awsElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
// gitRepo represents a git repository at a particular revision.
// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
// into the Pod's container.
// +optional
GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"`
// secret represents a secret that should populate this volume.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"`
// nfs represents an NFS mount on the host that shares a pod's lifetime
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
// iscsi represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://examples.k8s.io/volumes/iscsi/README.md
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
// glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
// persistentVolumeClaimVolumeSource represents a reference to a
// PersistentVolumeClaim in the same namespace.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://examples.k8s.io/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
// flexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// cinder represents a cinder volume attached and mounted on kubelets host machine.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
// flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
// downwardAPI represents downward API about the pod that should populate this volume
// +optional
DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"`
// fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
// configMap represents a configMap that should populate this volume
// +optional
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
// quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
// projected items for all in one resources secrets, configmaps, and downward API
Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
// portworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
// storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
// +optional
StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
// csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
// +optional
CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"`
// ephemeral represents a volume that is handled by a cluster storage driver.
// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
// and deleted when the pod is removed.
//
// Use this if:
// a) the volume is only needed while the pod runs,
// b) features of normal volumes like restoring from snapshot or capacity
// tracking are needed,
// c) the storage driver is specified through a storage class, and
// d) the storage driver supports dynamic volume provisioning through
// a PersistentVolumeClaim (see EphemeralVolumeSource for more
// information on the connection between this volume type
// and PersistentVolumeClaim).
//
// Use PersistentVolumeClaim or one of the vendor-specific
// APIs for volumes that persist for longer than the lifecycle
// of an individual pod.
//
// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
// be used that way - see the documentation of the driver for
// more information.
//
// A pod can use both types of ephemeral volumes and
// persistent volumes at the same time.
//
// +optional
Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
// This volume finds the bound PV and mounts that volume for the pod. A
// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
// type of volume that is owned by someone else (the system).
type PersistentVolumeClaimVolumeSource struct {
// claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"`
// readOnly Will force the ReadOnly setting in VolumeMounts.
// Default false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
}
// PersistentVolumeSource is similar to VolumeSource but meant for the
// administrator who creates PVs. Exactly one of its members must be set.
type PersistentVolumeSource struct {
// gcePersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
// awsElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
// hostPath represents a directory on the host.
// Provisioned by a developer or tester.
// This is useful for single-node development and testing only!
// On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
// glusterfs represents a Glusterfs volume that is attached to a host and
// exposed to the pod. Provisioned by an admin.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
// nfs represents an NFS mount on the host. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
// rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://examples.k8s.io/volumes/rbd/README.md
// +optional
RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
// iscsi represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// +optional
ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
// cinder represents a cinder volume attached and mounted on kubelets host machine.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
// cephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
// fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
// flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
// flexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
// +optional
FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// azureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
// vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
// quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
// azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
// photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
// portworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
// scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
// local represents directly-attached storage with node affinity
// +optional
Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
// storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
// More info: https://examples.k8s.io/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
// csi represents storage that is handled by an external CSI driver (Beta feature).
// +optional
CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
}
const (
// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation.
// It's currently still used and will be held for backwards compatibility
BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
// MountOptionAnnotation defines mount option annotation used in PVs
MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
// It is analogous to a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
type PersistentVolume struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec defines a specification of a persistent volume owned by the cluster.
// Provisioned by an administrator.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// status represents the current information/status for the persistent volume.
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// PersistentVolumeSpec is the specification of a persistent volume.
type PersistentVolumeSpec struct {
// capacity is the description of the persistent volume's resources and capacity.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// persistentVolumeSource is the actual volume backing the persistent volume.
PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"`
// accessModes contains all ways the volume can be mounted.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
// Expected to be non-nil when bound.
// claim.VolumeName is the authoritative bind between PV and PVC.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
// +optional
ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"`
// persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim.
// Valid options are Retain (default for manually created PersistentVolumes), Delete (default
// for dynamically provisioned PersistentVolumes), and Recycle (deprecated).
// Recycle must be supported by the volume plugin underlying this PersistentVolume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
// +optional
PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
// storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value
// means that this volume does not belong to any StorageClass.
// +optional
StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"`
// mountOptions is the list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
// simply fail if one is invalid.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
// +optional
MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
// volumeMode defines if a volume is intended to be used with a formatted filesystem
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
// nodeAffinity defines constraints that limit what nodes this volume can be accessed from.
// This field influences the scheduling of pods that use this volume.
// +optional
NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"`
}
// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
type VolumeNodeAffinity struct {
// required specifies hard node constraints that must be met.
Required *NodeSelector `json:"required,omitempty" protobuf:"bytes,1,opt,name=required"`
}
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
// +enum
type PersistentVolumeReclaimPolicy string
const (
// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
// The volume plugin must support Recycling.
PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
// The volume plugin must support Deletion.
PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
// PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator.
// The default policy is Retain.
PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
)
// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem.
// +enum
type PersistentVolumeMode string
const (
// PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device.
PersistentVolumeBlock PersistentVolumeMode = "Block"
// PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem.
PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem"
)
// PersistentVolumeStatus is the current status of a persistent volume.
type PersistentVolumeStatus struct {
// phase indicates if a volume is available, bound to a claim, or released by a claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
// +optional
Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"`
// message is a human-readable message indicating details about why the volume is in this state.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
// reason is a brief CamelCase string that describes any failure and is meant
// for machine parsing and tidy display in the CLI.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeList is a list of PersistentVolume items.
type PersistentVolumeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is a list of persistent volumes.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaim is a user's request for and claim to a persistent volume
type PersistentVolumeClaim struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec defines the desired characteristics of a volume requested by a pod author.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// status represents the current information/status of a persistent volume claim.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
type PersistentVolumeClaimList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items is a list of persistent volume claims.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PersistentVolumeClaimSpec describes the common attributes of storage devices
// and allows a Source for provider-specific attributes
type PersistentVolumeClaimSpec struct {
// accessModes contains the desired access modes the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// selector is a label query over volumes to consider for binding.
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
// resources represents the minimum resources the volume should have.
// If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
// that are lower than previous value but must still be higher than capacity recorded in the
// status field of the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
// volumeName is the binding reference to the PersistentVolume backing this claim.
// +optional
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
// storageClassName is the name of the StorageClass required by the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
// +optional
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
// dataSource field can be used to specify either:
// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
// * An existing PVC (PersistentVolumeClaim)
// If the provisioner or an external controller can support the specified data source,
// it will create a new volume based on the contents of the specified data source.
// If the AnyVolumeDataSource feature gate is enabled, this field will always have
// the same contents as the DataSourceRef field.
// +optional
DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"`
// dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
// volume is desired. This may be any local object from a non-empty API group (non
// core object) or a PersistentVolumeClaim object.
// When this field is specified, volume binding will only succeed if the type of
// the specified object matches some installed volume populator or dynamic
// provisioner.
// This field will replace the functionality of the DataSource field and as such
// if both fields are non-empty, they must have the same value. For backwards
// compatibility, both fields (DataSource and DataSourceRef) will be set to the same
// value automatically if one of them is empty and the other is non-empty.
// There are two important differences between DataSource and DataSourceRef:
// * While DataSource only allows two specific types of objects, DataSourceRef
// allows any non-core object, as well as PersistentVolumeClaim objects.
// * While DataSource ignores disallowed values (dropping them), DataSourceRef
// preserves all values, and generates an error if a disallowed value is
// specified.
// (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
// +optional
DataSourceRef *TypedLocalObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"`
}
// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
type PersistentVolumeClaimConditionType string
const (
// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
)
// +enum
type PersistentVolumeClaimResizeStatus string
const (
// When expansion is complete, the empty string is set by resize controller or kubelet.
PersistentVolumeClaimNoExpansionInProgress PersistentVolumeClaimResizeStatus = ""
// State set when resize controller starts expanding the volume in control-plane
PersistentVolumeClaimControllerExpansionInProgress PersistentVolumeClaimResizeStatus = "ControllerExpansionInProgress"
// State set when expansion has failed in resize controller with a terminal error.
// Transient errors such as timeout should not set this status and should leave ResizeStatus
// unmodified, so as resize controller can resume the volume expansion.
PersistentVolumeClaimControllerExpansionFailed PersistentVolumeClaimResizeStatus = "ControllerExpansionFailed"
// State set when resize controller has finished expanding the volume but further expansion is needed on the node.
PersistentVolumeClaimNodeExpansionPending PersistentVolumeClaimResizeStatus = "NodeExpansionPending"
// State set when kubelet starts expanding the volume.
PersistentVolumeClaimNodeExpansionInProgress PersistentVolumeClaimResizeStatus = "NodeExpansionInProgress"
// State set when expansion has failed in kubelet with a terminal error. Transient errors don't set NodeExpansionFailed.
PersistentVolumeClaimNodeExpansionFailed PersistentVolumeClaimResizeStatus = "NodeExpansionFailed"
)
// PersistentVolumeClaimCondition contails details about state of pvc
type PersistentVolumeClaimCondition struct {
Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// lastProbeTime is the time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// lastTransitionTime is the time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// reason is a unique, this should be a short, machine understandable string that gives the reason
// for condition's last transition. If it reports "ResizeStarted" that means the underlying
// persistent volume is being resized.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// message is the human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
type PersistentVolumeClaimStatus struct {
// phase represents the current phase of PersistentVolumeClaim.
// +optional
Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"`
// accessModes contains the actual access modes the volume backing the PVC has.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// capacity represents the actual resources of the underlying volume.
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// conditions is the current Condition of persistent volume claim. If underlying persistent volume is being
// resized then the Condition will be set to 'ResizeStarted'.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
// allocatedResources is the storage resource within AllocatedResources tracks the capacity allocated to a PVC. It may
// be larger than the actual capacity when a volume expansion operation is requested.
// For storage quota, the larger value from allocatedResources and PVC.spec.resources is used.
// If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation.
// If a volume expansion capacity request is lowered, allocatedResources is only
// lowered if there are no expansion operations in progress and if the actual volume capacity
// is equal or lower than the requested capacity.
// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
// +featureGate=RecoverVolumeExpansionFailure
// +optional
AllocatedResources ResourceList `json:"allocatedResources,omitempty" protobuf:"bytes,5,rep,name=allocatedResources,casttype=ResourceList,castkey=ResourceName"`
// resizeStatus stores status of resize operation.
// ResizeStatus is not set by default but when expansion is complete resizeStatus is set to empty
// string by resize controller or kubelet.
// This is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.
// +featureGate=RecoverVolumeExpansionFailure
// +optional
ResizeStatus *PersistentVolumeClaimResizeStatus `json:"resizeStatus,omitempty" protobuf:"bytes,6,opt,name=resizeStatus,casttype=PersistentVolumeClaimResizeStatus"`
}
// +enum
type PersistentVolumeAccessMode string
const (
// can be mounted in read/write mode to exactly 1 host
ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
// can be mounted in read-only mode to many hosts
ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
// can be mounted in read/write mode to many hosts
ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
// can be mounted in read/write mode to exactly 1 pod
// cannot be used in combination with other access modes
ReadWriteOncePod PersistentVolumeAccessMode = "ReadWriteOncePod"
)
// +enum
type PersistentVolumePhase string
const (
// used for PersistentVolumes that are not available
VolumePending PersistentVolumePhase = "Pending"
// used for PersistentVolumes that are not yet bound
// Available volumes are held by the binder and matched to PersistentVolumeClaims
VolumeAvailable PersistentVolumePhase = "Available"
// used for PersistentVolumes that are bound
VolumeBound PersistentVolumePhase = "Bound"
// used for PersistentVolumes where the bound PersistentVolumeClaim was deleted
// released volumes must be recycled before becoming available again
// this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource
VolumeReleased PersistentVolumePhase = "Released"
// used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim
VolumeFailed PersistentVolumePhase = "Failed"
)
// +enum
type PersistentVolumeClaimPhase string
const (
// used for PersistentVolumeClaims that are not yet bound
ClaimPending PersistentVolumeClaimPhase = "Pending"
// used for PersistentVolumeClaims that are bound
ClaimBound PersistentVolumeClaimPhase = "Bound"
// used for PersistentVolumeClaims that lost their underlying
// PersistentVolume. The claim was bound to a PersistentVolume and this
// volume does not exist any longer and all data on it was lost.
ClaimLost PersistentVolumeClaimPhase = "Lost"
)
// +enum
type HostPathType string
const (
// For backwards compatible, leave it empty if unset
HostPathUnset HostPathType = ""
// If nothing exists at the given path, an empty directory will be created there
// as needed with file mode 0755, having the same group and ownership with Kubelet.
HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate"
// A directory must exist at the given path
HostPathDirectory HostPathType = "Directory"
// If nothing exists at the given path, an empty file will be created there
// as needed with file mode 0644, having the same group and ownership with Kubelet.
HostPathFileOrCreate HostPathType = "FileOrCreate"
// A file must exist at the given path
HostPathFile HostPathType = "File"
// A UNIX socket must exist at the given path
HostPathSocket HostPathType = "Socket"
// A character device must exist at the given path
HostPathCharDev HostPathType = "CharDevice"
// A block device must exist at the given path
HostPathBlockDev HostPathType = "BlockDevice"
)
// Represents a host path mapped into a pod.
// Host path volumes do not support ownership management or SELinux relabeling.
type HostPathVolumeSource struct {
// path of the directory on the host.
// If the path is a symlink, it will follow the link to the real path.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// type for HostPath Volume
// Defaults to ""
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
}
// Represents an empty directory for a pod.
// Empty directory volumes support ownership management and SELinux relabeling.
type EmptyDirVolumeSource struct {
// medium represents what type of storage medium should back this directory.
// The default is "" which means to use the node's default medium.
// Must be an empty string (default) or Memory.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"`
// sizeLimit is the total amount of local storage required for this EmptyDir volume.
// The size limit is also applicable for memory medium.
// The maximum usage on memory medium EmptyDir would be the minimum value between
// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
// The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// +optional
SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"`
}
// Represents a Glusterfs mount that lasts the lifetime of a pod.
// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsVolumeSource struct {
// endpoints is the endpoint name that details Glusterfs topology.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
// path is the Glusterfs volume path.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
// Defaults to false.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Glusterfs mount that lasts the lifetime of a pod.
// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsPersistentVolumeSource struct {
// endpoints is the endpoint name that details Glusterfs topology.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
// path is the Glusterfs volume path.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
// Defaults to false.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// endpointsNamespace is the namespace that contains Glusterfs endpoint.
// If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC.
// More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
// +optional
EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDVolumeSource struct {
// monitors is a collection of Ceph monitors.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// image is the rados image name.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// fsType is the filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// pool is the rados pool name.
// Default is rbd.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// user is the rados user name.
// Default is admin.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// secretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// readOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDPersistentVolumeSource struct {
// monitors is a collection of Ceph monitors.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// image is the rados image name.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// fsType is the filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// pool is the rados pool name.
// Default is rbd.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// user is the rados user name.
// Default is admin.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// secretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// readOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
// Cinder volumes support ownership management and SELinux relabeling.
type CinderVolumeSource struct {
// volumeID used to identify the volume in cinder.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// secretRef is optional: points to a secret object containing parameters used to connect
// to OpenStack.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"`
}
// Represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
// Cinder volumes support ownership management and SELinux relabeling.
type CinderPersistentVolumeSource struct {
// volumeID used to identify the volume in cinder.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// fsType Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://examples.k8s.io/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// secretRef is Optional: points to a secret object containing parameters used to connect
// to OpenStack.
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSVolumeSource struct {
// monitors is Required: Monitors is a collection of Ceph monitors
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// user is optional: User is the rados user name, default is admin
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// SecretReference represents a Secret Reference. It has enough information to retrieve secret
// in any namespace
// +structType=atomic
type SecretReference struct {
// name is unique within a namespace to reference a secret resource.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// namespace defines the space within which the secret name must be unique.
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSPersistentVolumeSource struct {
// monitors is Required: Monitors is a collection of Ceph monitors
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// user is Optional: User is the rados user name, default is admin
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// Represents a Flocker volume mounted by the Flocker agent.
// One and only one of datasetName and datasetUUID should be set.
// Flocker volumes do not support ownership management or SELinux relabeling.
type FlockerVolumeSource struct {
// datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
// should be considered as deprecated
// +optional
DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"`
// datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset
// +optional
DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"`
}
// StorageMedium defines ways that storage can be allocated to a volume.
type StorageMedium string
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this
StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux)
StorageMediumHugePages StorageMedium = "HugePages" // use hugepages
StorageMediumHugePagesPrefix StorageMedium = "HugePages-" // prefix for full medium notation HugePages-<size>
)
// Protocol defines network protocols supported for things like container ports.
// +enum
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
// ProtocolSCTP is the SCTP protocol.
ProtocolSCTP Protocol = "SCTP"
)
// Represents a Persistent Disk resource in Google Compute Engine.
//
// A GCE PD must exist before mounting to a container. The disk must
// also be in the same GCE project and zone as the kubelet. A GCE PD
// can only be mounted as read/write once or read-only many times. GCE
// PDs support ownership management and SELinux relabeling.
type GCEPersistentDiskVolumeSource struct {
// pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"`
// fsType is filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// partition is the partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// readOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a Quobyte mount that lasts the lifetime of a pod.
// Quobyte volumes do not support ownership management or SELinux relabeling.
type QuobyteVolumeSource struct {
// registry represents a single or multiple Quobyte Registry services
// specified as a string as host:port pair (multiple entries are separated with commas)
// which acts as the central registry for volumes
Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"`
// volume is a string that references an already created Quobyte volume by name.
Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"`
// readOnly here will force the Quobyte volume to be mounted with read-only permissions.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// user to map volume access to
// Defaults to serivceaccount user
// +optional
User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"`
// group to map volume access to
// Default is no group
// +optional
Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"`
// tenant owning the given Quobyte volume in the Backend
// Used with dynamically provisioned Quobyte volumes, value is set by the plugin
// +optional
Tenant string `json:"tenant,omitempty" protobuf:"bytes,6,opt,name=tenant"`
}
// FlexPersistentVolumeSource represents a generic persistent volume resource that is
// provisioned/attached using an exec based plugin.
type FlexPersistentVolumeSource struct {
// driver is the name of the driver to use for this volume.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// fsType is the Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// secretRef is Optional: SecretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
// readOnly is Optional: defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// options is Optional: this field holds extra command options if any.
// +optional
Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
}
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
type FlexVolumeSource struct {
// driver is the name of the driver to use for this volume.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// secretRef is Optional: secretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
// readOnly is Optional: defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// options is Optional: this field holds extra command options if any.
// +optional
Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
}
// Represents a Persistent Disk resource in AWS.
//
// An AWS EBS disk must exist before mounting to a container. The disk
// must also be in the same AWS zone as the kubelet. An AWS EBS disk
// can only be mounted as read/write once. AWS EBS volumes support
// ownership management and SELinux relabeling.
type AWSElasticBlockStoreVolumeSource struct {
// volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// fsType is the filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// partition is the partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// readOnly value true will force the readOnly setting in VolumeMounts.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a volume that is populated with the contents of a git repository.
// Git repo volumes do not support ownership management.
// Git repo volumes support SELinux relabeling.
//
// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
// into the Pod's container.
type GitRepoVolumeSource struct {
// repository is the URL
Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"`
// revision is the commit hash for the specified revision.
// +optional
Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
// directory is the target directory name.
// Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
// git repository. Otherwise, if specified, the volume will contain the git repository in
// the subdirectory with the given name.
// +optional
Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"`
}
// Adapts a Secret into a volume.
//
// The contents of the target Secret's Data field will be presented in a volume
// as files using the keys in the Data field as the file names.
// Secret volumes support ownership management and SELinux relabeling.
type SecretVolumeSource struct {
// secretName is the name of the secret in the pod's namespace to use.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"`
// items If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// defaultMode is Optional: mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values
// for mode bits. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"`
// optional field specify whether the Secret or its keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
SecretVolumeSourceDefaultMode int32 = 0644
)
// Adapts a secret into a projected volume.
//
// The contents of the target Secret's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names.
// Note that this is identical to a secret volume source without the default
// mode.
type SecretProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// items if unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// optional field specify whether the Secret or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents an NFS mount that lasts the lifetime of a pod.
// NFS volumes do not support ownership management or SELinux relabeling.
type NFSVolumeSource struct {
// server is the hostname or IP address of the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Server string `json:"server" protobuf:"bytes,1,opt,name=server"`
// path that is exported by the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// readOnly here will force the NFS export to be mounted with read-only permissions.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIVolumeSource struct {
// targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// iqn is the target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// lun represents iSCSI Target Lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// iscsiInterface is the interface Name that uses an iSCSI transport.
// Defaults to 'default' (tcp).
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// fsType is the filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// readOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// chapAuthSession defines whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// secretRef is the CHAP Secret for iSCSI target and initiator authentication
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// initiatorName is the custom iSCSI Initiator Name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// ISCSIPersistentVolumeSource represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIPersistentVolumeSource struct {
// targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// iqn is Target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// lun is iSCSI Target Lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// iscsiInterface is the interface Name that uses an iSCSI transport.
// Defaults to 'default' (tcp).
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// fsType is the filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// readOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// chapAuthSession defines whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// secretRef is the CHAP Secret for iSCSI target and initiator authentication
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// initiatorName is the custom iSCSI Initiator Name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// Represents a Fibre Channel volume.
// Fibre Channel volumes can only be mounted as read/write once.
// Fibre Channel volumes support ownership management and SELinux relabeling.
type FCVolumeSource struct {
// targetWWNs is Optional: FC target worldwide names (WWNs)
// +optional
TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"`
// lun is Optional: FC target lun number
// +optional
Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// wwids Optional: FC volume world wide identifiers (wwids)
// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
// +optional
WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFileVolumeSource struct {
// secretName is the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// shareName is the azure share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFilePersistentVolumeSource struct {
// secretName is the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// shareName is the azure Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key
// default is the same as the Pod
// +optional
SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"`
}
// Represents a vSphere volume resource.
type VsphereVirtualDiskVolumeSource struct {
// volumePath is the path that identifies vSphere volume vmdk
VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"`
// fsType is filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// storagePolicyName is the storage Policy Based Management (SPBM) profile name.
// +optional
StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"`
// storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
// +optional
StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"`
}
// Represents a Photon Controller persistent disk resource.
type PhotonPersistentDiskVolumeSource struct {
// pdID is the ID that identifies Photon Controller persistent disk
PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
// +enum
type AzureDataDiskCachingMode string
// +enum
type AzureDataDiskKind string
const (
AzureDataDiskCachingNone AzureDataDiskCachingMode = "None"
AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly"
AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite"
AzureSharedBlobDisk AzureDataDiskKind = "Shared"
AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated"
AzureManagedDisk AzureDataDiskKind = "Managed"
)
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
type AzureDiskVolumeSource struct {
// diskName is the Name of the data disk in the blob storage
DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"`
// diskURI is the URI of data disk in the blob storage
DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
// cachingMode is the Host Caching mode: None, Read Only, Read Write.
// +optional
CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
// fsType is Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// readOnly Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
// kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
}
// PortworxVolumeSource represents a Portworx volume resource.
type PortworxVolumeSource struct {
// volumeID uniquely identifies a Portworx volume
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// fSType represents the filesystem type to mount
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// ScaleIOVolumeSource represents a persistent ScaleIO volume
type ScaleIOVolumeSource struct {
// gateway is the host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// system is the name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// secretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// sslEnabled Flag enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// storagePool is the ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// Default is ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// volumeName is the name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs".
// Default is "xfs".
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// readOnly Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
type ScaleIOPersistentVolumeSource struct {
// gateway is the host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// system is the name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// secretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// sslEnabled is the flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// storagePool is the ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// Default is ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// volumeName is the name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs".
// Default is "xfs"
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSVolumeSource struct {
// volumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// volumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// secretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSPersistentVolumeSource struct {
// volumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// volumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// fsType is the filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// readOnly defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// secretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// volume as files using the keys in the Data field as the file names, unless
// the items element is populated with specific mappings of keys to paths.
// ConfigMap volumes support ownership management and SELinux relabeling.
type ConfigMapVolumeSource struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// items if unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// defaultMode is optional: mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"`
// optional specify whether the ConfigMap or its keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
ConfigMapVolumeSourceDefaultMode int32 = 0644
)
// Adapts a ConfigMap into a projected volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names,
// unless the items element is populated with specific mappings of keys to paths.
// Note that this is identical to a configmap volume source without the default
// mode.
type ConfigMapProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// items if unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// optional specify whether the ConfigMap or its keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// ServiceAccountTokenProjection represents a projected service account token
// volume. This projection can be used to insert a service account token into
// the pods runtime filesystem for use against APIs (Kubernetes API Server or
// otherwise).
type ServiceAccountTokenProjection struct {
// audience is the intended audience of the token. A recipient of a token
// must identify itself with an identifier specified in the audience of the
// token, and otherwise should reject the token. The audience defaults to the
// identifier of the apiserver.
//+optional
Audience string `json:"audience,omitempty" protobuf:"bytes,1,rep,name=audience"`
// expirationSeconds is the requested duration of validity of the service
// account token. As the token approaches expiration, the kubelet volume
// plugin will proactively rotate the service account token. The kubelet will
// start trying to rotate the token if the token is older than 80 percent of
// its time to live or if the token is older than 24 hours.Defaults to 1 hour
// and must be at least 10 minutes.
//+optional
ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"`
// path is the path relative to the mount point of the file to project the
// token into.
Path string `json:"path" protobuf:"bytes,3,opt,name=path"`
}
// Represents a projected volume source
type ProjectedVolumeSource struct {
// sources is the list of volume projections
// +optional
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
// defaultMode are the mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
// Projection that may be projected along with other supported volume types
type VolumeProjection struct {
// all types below are the supported types for projection into the same volume
// secret information about the secret data to project
// +optional
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
// downwardAPI information about the downwardAPI data to project
// +optional
DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"`
// configMap information about the configMap data to project
// +optional
ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
// serviceAccountToken is information about the serviceAccountToken data to project
// +optional
ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"`
}
const (
ProjectedVolumeSourceDefaultMode int32 = 0644
)
// Maps a string key to a path within a volume.
type KeyToPath struct {
// key is the key to project.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// path is the relative path of the file to map the key to.
// May not be an absolute path.
// May not contain the path element '..'.
// May not start with the string '..'.
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// mode is Optional: mode bits used to set permissions on this file.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
}
// Local represents directly-attached storage with node affinity (Beta feature)
type LocalVolumeSource struct {
// path of the full path to the volume on the node.
// It can be either a directory or block device (disk, partition, ...).
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// fsType is the filesystem type to mount.
// It applies only when the Path is a block device.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a filesystem if unspecified.
// +optional
FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
// Represents storage that is managed by an external CSI volume driver (Beta feature)
type CSIPersistentVolumeSource struct {
// driver is the name of the driver to use for this volume.
// Required.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// volumeHandle is the unique volume name returned by the CSI volume
// plugin’s CreateVolume to refer to the volume on all subsequent calls.
// Required.
VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"`
// readOnly value to pass to ControllerPublishVolumeRequest.
// Defaults to false (read/write).
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// fsType to mount. Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs".
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// volumeAttributes of the volume to publish.
// +optional
VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,5,rep,name=volumeAttributes"`
// controllerPublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerPublishVolume and ControllerUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"`
// nodeStageSecretRef is a reference to the secret object containing sensitive
// information to pass to the CSI driver to complete the CSI NodeStageVolume
// and NodeStageVolume and NodeUnstageVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"`
// nodePublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodePublishVolume and NodeUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"`
// controllerExpandSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerExpandVolume call.
// This is an alpha field and requires enabling ExpandCSIVolumes feature gate.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"`
}
// Represents a source location of a volume to mount, managed by an external CSI driver
type CSIVolumeSource struct {
// driver is the name of the CSI driver that handles this volume.
// Consult with your admin for the correct name as registered in the cluster.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// readOnly specifies a read-only configuration for the volume.
// Defaults to false (read/write).
// +optional
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
// fsType to mount. Ex. "ext4", "xfs", "ntfs".
// If not provided, the empty value is passed to the associated CSI driver
// which will determine the default filesystem to apply.
// +optional
FSType *string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// volumeAttributes stores driver-specific properties that are passed to the CSI
// driver. Consult your driver's documentation for supported values.
// +optional
VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,4,rep,name=volumeAttributes"`
// nodePublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodePublishVolume and NodeUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secret references are passed.
// +optional
NodePublishSecretRef *LocalObjectReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,5,opt,name=nodePublishSecretRef"`
}
// Represents an ephemeral volume that is handled by a normal storage driver.
type EphemeralVolumeSource struct {
// Will be used to create a stand-alone PVC to provision the volume.
// The pod in which this EphemeralVolumeSource is embedded will be the
// owner of the PVC, i.e. the PVC will be deleted together with the
// pod. The name of the PVC will be `<pod name>-<volume name>` where
// `<volume name>` is the name from the `PodSpec.Volumes` array
// entry. Pod validation will reject the pod if the concatenated name
// is not valid for a PVC (for example, too long).
//
// An existing PVC with that name that is not owned by the pod
// will *not* be used for the pod to avoid using an unrelated
// volume by mistake. Starting the pod is then blocked until
// the unrelated PVC is removed. If such a pre-created PVC is
// meant to be used by the pod, the PVC has to updated with an
// owner reference to the pod once the pod exists. Normally
// this should not be necessary, but it may be useful when
// manually reconstructing a broken cluster.
//
// This field is read-only and no changes will be made by Kubernetes
// to the PVC after it has been created.
//
// Required, must not be nil.
VolumeClaimTemplate *PersistentVolumeClaimTemplate `json:"volumeClaimTemplate,omitempty" protobuf:"bytes,1,opt,name=volumeClaimTemplate"`
// ReadOnly is tombstoned to show why 2 is a reserved protobuf tag.
// ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
}
// PersistentVolumeClaimTemplate is used to produce
// PersistentVolumeClaim objects as part of an EphemeralVolumeSource.
type PersistentVolumeClaimTemplate struct {
// May contain labels and annotations that will be copied into the PVC
// when creating it. No other fields are allowed and will be rejected during
// validation.
//
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The specification for the PersistentVolumeClaim. The entire content is
// copied unchanged into the PVC that gets created from this
// template. The same fields as in a PersistentVolumeClaim
// are also valid here.
Spec PersistentVolumeClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
}
// ContainerPort represents a network port in a single container.
type ContainerPort struct {
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
// named port in a pod must have a unique name. Name for the port that can be
// referred to by services.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Number of port to expose on the host.
// If specified, this must be a valid port number, 0 < x < 65536.
// If HostNetwork is specified, this must match ContainerPort.
// Most containers do not need this.
// +optional
HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"`
// Number of port to expose on the pod's IP address.
// This must be a valid port number, 0 < x < 65536.
ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"`
// Protocol for port. Must be UDP, TCP, or SCTP.
// Defaults to "TCP".
// +optional
// +default="TCP"
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
// What host IP to bind the external port to.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
}
// VolumeMount describes a mounting of a Volume within a container.
type VolumeMount struct {
// This must match the Name of a Volume.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Mounted read-only if true, read-write otherwise (false or unspecified).
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
// Path within the container at which the volume should be mounted. Must
// not contain ':'.
MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"`
// Path within the volume from which the container's volume should be mounted.
// Defaults to "" (volume's root).
// +optional
SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"`
// mountPropagation determines how mounts are propagated from the host
// to container and the other way around.
// When not set, MountPropagationNone is used.
// This field is beta in 1.10.
// +optional
MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"`
// Expanded path within the volume from which the container's volume should be mounted.
// Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
// Defaults to "" (volume's root).
// SubPathExpr and SubPath are mutually exclusive.
// +optional
SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"`
}
// MountPropagationMode describes mount propagation.
// +enum
type MountPropagationMode string
const (
// MountPropagationNone means that the volume in a container will
// not receive new mounts from the host or other containers, and filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode corresponds to "private" in Linux terminology.
MountPropagationNone MountPropagationMode = "None"
// MountPropagationHostToContainer means that the volume in a container will
// receive new mounts from the host or other containers, but filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rslave" in Linux terminology).
MountPropagationHostToContainer MountPropagationMode = "HostToContainer"
// MountPropagationBidirectional means that the volume in a container will
// receive new mounts from the host or other containers, and its own mounts
// will be propagated from the container to the host or other containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rshared" in Linux terminology).
MountPropagationBidirectional MountPropagationMode = "Bidirectional"
)
// volumeDevice describes a mapping of a raw block device within a container.
type VolumeDevice struct {
// name must match the name of a persistentVolumeClaim in the pod
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// devicePath is the path inside of the container that the device will be mapped to.
DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"`
}
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Name of the environment variable. Must be a C_IDENTIFIER.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Optional: no more than one of the following may be specified.
// Variable references $(VAR_NAME) are expanded
// using the previously defined environment variables in the container and
// any service environment variables. If a variable cannot be resolved,
// the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
// "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
// Escaped references will never be expanded, regardless of whether the variable
// exists or not.
// Defaults to "".
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Source for the environment variable's value. Cannot be used if value is not empty.
// +optional
ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"`
}
// EnvVarSource represents a source for the value of an EnvVar.
type EnvVarSource struct {
// Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['<KEY>']`, `metadata.annotations['<KEY>']`,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"`
// Selects a key of a ConfigMap.
// +optional
ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"`
// Selects a key of a secret in the pod's namespace
// +optional
SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
}
// ObjectFieldSelector selects an APIVersioned field of an object.
// +structType=atomic
type ObjectFieldSelector struct {
// Version of the schema the FieldPath is written in terms of, defaults to "v1".
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
// Path of the field to select in the specified API version.
FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"`
}
// ResourceFieldSelector represents container resources (cpu, memory) and their output format
// +structType=atomic
type ResourceFieldSelector struct {
// Container name: required for volumes, optional for env vars
// +optional
ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
// Required: resource to select
Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
// Specifies the output format of the exposed resources, defaults to "1"
// +optional
Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"`
}
// Selects a key from a ConfigMap.
// +structType=atomic
type ConfigMapKeySelector struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key to select.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the ConfigMap or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// SecretKeySelector selects a key of a Secret.
// +structType=atomic
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key of the secret to select from. Must be a valid secret key.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the Secret or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// EnvFromSource represents the source of a set of ConfigMaps
type EnvFromSource struct {
// An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
// +optional
Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
// The ConfigMap to select from
// +optional
ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"`
// The Secret to select from
// +optional
SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
}
// ConfigMapEnvSource selects a ConfigMap to populate the environment
// variables with.
//
// The contents of the target ConfigMap's Data field will represent the
// key-value pairs as environment variables.
type ConfigMapEnvSource struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the ConfigMap must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// SecretEnvSource selects a Secret to populate the environment
// variables with.
//
// The contents of the target Secret's Data field will represent the
// key-value pairs as environment variables.
type SecretEnvSource struct {
// The Secret to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the Secret must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// HTTPHeader describes a custom header to be used in HTTP probes
type HTTPHeader struct {
// The header field name
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// The header field value
Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
}
// HTTPGetAction describes an action based on HTTP Get requests.
type HTTPGetAction struct {
// Path to access on the HTTP server.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
// Name or number of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"`
// Host name to connect to, defaults to the pod IP. You probably want to set
// "Host" in httpHeaders instead.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"`
// Scheme to use for connecting to the host.
// Defaults to HTTP.
// +optional
Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"`
// Custom headers to set in the request. HTTP allows repeated headers.
// +optional
HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"`
}
// URIScheme identifies the scheme used for connection to a host for Get actions
// +enum
type URIScheme string
const (
// URISchemeHTTP means that the scheme used will be http://
URISchemeHTTP URIScheme = "HTTP"
// URISchemeHTTPS means that the scheme used will be https://
URISchemeHTTPS URIScheme = "HTTPS"
)
// TCPSocketAction describes an action based on opening a socket
type TCPSocketAction struct {
// Number or name of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"`
// Optional: Host name to connect to, defaults to the pod IP.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
type GRPCAction struct {
// Port number of the gRPC service. Number must be in the range 1 to 65535.
Port int32 `json:"port" protobuf:"bytes,1,opt,name=port"`
// Service is the name of the service to place in the gRPC HealthCheckRequest
// (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
//
// If this is not specified, the default behavior is defined by gRPC.
// +optional
// +default=""
Service *string `json:"service" protobuf:"bytes,2,opt,name=service"`
}
// ExecAction describes a "run in container" action.
type ExecAction struct {
// Command is the command line to execute inside the container, the working directory for the
// command is root ('/') in the container's filesystem. The command is simply exec'd, it is
// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
// a shell, you need to explicitly call out to that shell.
// Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
}
// Probe describes a health check to be performed against a container to determine whether it is
// alive or ready to receive traffic.
type Probe struct {
// The action taken to determine the health of a container
ProbeHandler `json:",inline" protobuf:"bytes,1,opt,name=handler"`
// Number of seconds after the container has started before liveness probes are initiated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"`
// Number of seconds after which the probe times out.
// Defaults to 1 second. Minimum value is 1.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
// How often (in seconds) to perform the probe.
// Default to 10 seconds. Minimum value is 1.
// +optional
PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"`
// Minimum consecutive successes for the probe to be considered successful after having failed.
// Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
// +optional
SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"`
// Minimum consecutive failures for the probe to be considered failed after having succeeded.
// Defaults to 3. Minimum value is 1.
// +optional
FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"`
// Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
// If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
// value overrides the value provided by the pod spec.
// Value must be non-negative integer. The value zero indicates stop immediately via
// the kill signal (no opportunity to shut down).
// This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
// Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,7,opt,name=terminationGracePeriodSeconds"`
}
// PullPolicy describes a policy for if/when to pull a container image
// +enum
type PullPolicy string
const (
// PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
PullAlways PullPolicy = "Always"
// PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
PullNever PullPolicy = "Never"
// PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
PullIfNotPresent PullPolicy = "IfNotPresent"
)
// PreemptionPolicy describes a policy for if/when to preempt a pod.
// +enum
type PreemptionPolicy string
const (
// PreemptLowerPriority means that pod can preempt other pods with lower priority.
PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority"
// PreemptNever means that pod never preempts other pods with lower priority.
PreemptNever PreemptionPolicy = "Never"
)
// TerminationMessagePolicy describes how termination messages are retrieved from a container.
// +enum
type TerminationMessagePolicy string
const (
// TerminationMessageReadFile is the default behavior and will set the container status message to
// the contents of the container's terminationMessagePath when the container exits.
TerminationMessageReadFile TerminationMessagePolicy = "File"
// TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs
// for the container status message when the container exits with an error and the
// terminationMessagePath has no contents.
TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError"
)
// Capability represent POSIX capabilities type
type Capability string
// Adds and removes POSIX capabilities from running containers.
type Capabilities struct {
// Added capabilities
// +optional
Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"`
// Removed capabilities
// +optional
Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"`
}
// ResourceRequirements describes the compute resource requirements.
type ResourceRequirements struct {
// Limits describes the maximum amount of compute resources allowed.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
// Requests describes the minimum amount of compute resources required.
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
// otherwise to an implementation-defined value.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
}
const (
// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
TerminationMessagePathDefault string = "/dev/termination-log"
)
// A single application container that you want to run within a pod.
type Container struct {
// Name of the container specified as a DNS_LABEL.
// Each container in a pod must have a unique name (DNS_LABEL).
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Container image name.
// More info: https://kubernetes.io/docs/concepts/containers/images
// This field is optional to allow higher level config management to default or override
// container images in workload controllers like Deployments and StatefulSets.
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The container image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The container image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Container's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of ports to expose from the container. Exposing a port here gives
// the system additional information about the network connections a
// container uses, but is primarily informational. Not specifying a port here
// DOES NOT prevent that port from being exposed. Any port which is
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Cannot be updated.
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
// +listType=map
// +listMapKey=containerPort
// +listMapKey=protocol
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
// Compute Resources required by this container.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the container.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
// Periodic probe of container liveness.
// Container will be restarted if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Periodic probe of container service readiness.
// Container will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// StartupProbe indicates that the Pod has successfully initialized.
// If specified, no other probes are executed until this completes successfully.
// If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
// This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
// when it might take a long time to load data or warm a cache, than during steady-state operation.
// This cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
// Actions that the management system should take in response to container lifecycle events.
// Cannot be updated.
// +optional
Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Optional: Path at which the file to which the container's termination message
// will be written is mounted into the container's filesystem.
// Message written is intended to be brief final status, such as an assertion failure message.
// Will be truncated by the node if greater than 4096 bytes. The total message length across
// all containers will be limited to 12kb.
// Defaults to /dev/termination-log.
// Cannot be updated.
// +optional
TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Indicate how the termination message should be populated. File will use the contents of
// terminationMessagePath to populate the container status message on both success and failure.
// FallbackToLogsOnError will use the last chunk of container log output if the termination
// message file is empty and the container exited with an error.
// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
// Defaults to File.
// Cannot be updated.
// +optional
TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// SecurityContext defines the security options the container should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
// and shouldn't be used for general purpose containers.
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
// Default is false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the container is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
// +optional
StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
// Default is false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
}
// ProbeHandler defines a specific action that should be taken in a probe.
// One and only one of the fields must be specified.
type ProbeHandler struct {
// Exec specifies the action to take.
// +optional
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
// HTTPGet specifies the http request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
// TCPSocket specifies an action involving a TCP port.
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
// GRPC specifies an action involving a GRPC port.
// This is a beta field and requires enabling GRPCContainerProbe feature gate.
// +featureGate=GRPCContainerProbe
// +optional
GRPC *GRPCAction `json:"grpc,omitempty" protobuf:"bytes,4,opt,name=grpc"`
}
// LifecycleHandler defines a specific action that should be taken in a lifecycle
// hook. One and only one of the fields, except TCPSocket must be specified.
type LifecycleHandler struct {
// Exec specifies the action to take.
// +optional
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
// HTTPGet specifies the http request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
// Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
// for the backward compatibility. There are no validation of this field and
// lifecycle hooks will fail in runtime when tcp handler is specified.
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
}
// Lifecycle describes actions that the management system should take in response to container lifecycle
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
// until the action is complete, unless the container process fails, in which case the handler is aborted.
type Lifecycle struct {
// PostStart is called immediately after a container is created. If the handler fails,
// the container is terminated and restarted according to its restart policy.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PostStart *LifecycleHandler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"`
// PreStop is called immediately before a container is terminated due to an
// API request or management event such as liveness/startup probe failure,
// preemption, resource contention, etc. The handler is not called if the
// container crashes or exits. The Pod's termination grace period countdown begins before the
// PreStop hook is executed. Regardless of the outcome of the handler, the
// container will eventually terminate within the Pod's termination grace
// period (unless delayed by finalizers). Other management of the container blocks until the hook completes
// or until the termination grace period is reached.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PreStop *LifecycleHandler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
}
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// ContainerStateWaiting is a waiting state of a container.
type ContainerStateWaiting struct {
// (brief) reason the container is not yet running.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"`
// Message regarding why the container is not yet running.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
}
// ContainerStateRunning is a running state of a container.
type ContainerStateRunning struct {
// Time at which the container was last (re-)started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"`
}
// ContainerStateTerminated is a terminated state of a container.
type ContainerStateTerminated struct {
// Exit status from the last termination of the container
ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"`
// Signal from the last termination of the container
// +optional
Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"`
// (brief) reason from the last termination of the container
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Message regarding the last termination of the container
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// Time at which previous execution of the container started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"`
// Time at which the container last terminated
// +optional
FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"`
// Container's ID in the format '<type>://<container_id>'
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"`
}
// ContainerState holds a possible state of container.
// Only one of its members may be specified.
// If none of them is specified, the default one is ContainerStateWaiting.
type ContainerState struct {
// Details about a waiting container
// +optional
Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"`
// Details about a running container
// +optional
Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"`
// Details about a terminated container
// +optional
Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"`
}
// ContainerStatus contains details for the current status of this container.
type ContainerStatus struct {
// This must be a DNS_LABEL. Each container in a pod must have a unique name.
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Details about the container's current condition.
// +optional
State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"`
// Details about the container's last termination condition.
// +optional
LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"`
// Specifies whether the container has passed its readiness probe.
Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"`
// The number of times the container has been restarted.
RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"`
// The image the container is running.
// More info: https://kubernetes.io/docs/concepts/containers/images.
Image string `json:"image" protobuf:"bytes,6,opt,name=image"`
// ImageID of the container's image.
ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"`
// Container's ID in the format '<type>://<container_id>'.
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"`
// Specifies whether the container has passed its startup probe.
// Initialized as false, becomes true after startupProbe is considered successful.
// Resets to false when the container is restarted, or if kubelet loses state temporarily.
// Is always true when no startupProbe is defined.
// +optional
Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"`
}
// PodPhase is a label for the condition of a pod at the current time.
// +enum
type PodPhase string
// These are the valid statuses of pods.
const (
// PodPending means the pod has been accepted by the system, but one or more of the containers
// has not been started. This includes time before being bound to a node, as well as time spent
// pulling images onto the host.
PodPending PodPhase = "Pending"
// PodRunning means the pod has been bound to a node and all of the containers have been started.
// At least one container is still running or is in the process of being restarted.
PodRunning PodPhase = "Running"
// PodSucceeded means that all containers in the pod have voluntarily terminated
// with a container exit code of 0, and the system is not going to restart any of these containers.
PodSucceeded PodPhase = "Succeeded"
// PodFailed means that all containers in the pod have terminated, and at least one container has
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
PodFailed PodPhase = "Failed"
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
// to an error in communicating with the host of the pod.
// Deprecated: It isn't being set since 2015 (74da3b14b0c0f658b3bb8d2def5094686d0e9095)
PodUnknown PodPhase = "Unknown"
)
// PodConditionType is a valid value for PodCondition.Type
type PodConditionType string
// These are built-in conditions of pod. An application may use a custom condition not listed here.
const (
// ContainersReady indicates whether all containers in the pod are ready.
ContainersReady PodConditionType = "ContainersReady"
// PodInitialized means that all init containers in the pod have started successfully.
PodInitialized PodConditionType = "Initialized"
// PodReady means the pod is able to service requests and should be added to the
// load balancing pools of all matching services.
PodReady PodConditionType = "Ready"
// PodScheduled represents status of the scheduling process for this pod.
PodScheduled PodConditionType = "PodScheduled"
)
// These are reasons for a pod's transition to a condition.
const (
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
PodReasonUnschedulable = "Unschedulable"
)
// PodCondition contains details for the current condition of this pod.
type PodCondition struct {
// Type is the type of the condition.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
// Status is the status of the condition.
// Can be True, False, Unknown.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// RestartPolicy describes how the container should be restarted.
// Only one of the following restart policies may be specified.
// If none of the following policies is specified, the default one
// is RestartPolicyAlways.
// +enum
type RestartPolicy string
const (
RestartPolicyAlways RestartPolicy = "Always"
RestartPolicyOnFailure RestartPolicy = "OnFailure"
RestartPolicyNever RestartPolicy = "Never"
)
// DNSPolicy defines how a pod's DNS will be configured.
// +enum
type DNSPolicy string
const (
// DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS
// first, if it is available, then fall back on the default
// (as determined by kubelet) DNS settings.
DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet"
// DNSClusterFirst indicates that the pod should use cluster DNS
// first unless hostNetwork is true, if it is available, then
// fall back on the default (as determined by kubelet) DNS settings.
DNSClusterFirst DNSPolicy = "ClusterFirst"
// DNSDefault indicates that the pod should use the default (as
// determined by kubelet) DNS settings.
DNSDefault DNSPolicy = "Default"
// DNSNone indicates that the pod should use empty DNS settings. DNS
// parameters such as nameservers and search paths should be defined via
// DNSConfig.
DNSNone DNSPolicy = "None"
)
const (
// DefaultTerminationGracePeriodSeconds indicates the default duration in
// seconds a pod needs to terminate gracefully.
DefaultTerminationGracePeriodSeconds = 30
)
// A node selector represents the union of the results of one or more label queries
// over a set of nodes; that is, it represents the OR of the selectors represented
// by the node selector terms.
// +structType=atomic
type NodeSelector struct {
//Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
}
// A null or empty node selector term matches no objects. The requirements of
// them are ANDed.
// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
// +structType=atomic
type NodeSelectorTerm struct {
// A list of node selector requirements by node's labels.
// +optional
MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"`
// A list of node selector requirements by node's fields.
// +optional
MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" protobuf:"bytes,2,rep,name=matchFields"`
}
// A node selector requirement is a selector that contains values, a key, and an operator
// that relates the key and values.
type NodeSelectorRequirement struct {
// The label key that the selector applies to.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Represents a key's relationship to a set of values.
// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"`
// An array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty. If the operator is Gt or Lt, the values
// array must have a single element, which will be interpreted as an integer.
// This array is replaced during a strategic merge patch.
// +optional
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
// A node selector operator is the set of operators that can be used in
// a node selector requirement.
// +enum
type NodeSelectorOperator string
const (
NodeSelectorOpIn NodeSelectorOperator = "In"
NodeSelectorOpNotIn NodeSelectorOperator = "NotIn"
NodeSelectorOpExists NodeSelectorOperator = "Exists"
NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
NodeSelectorOpGt NodeSelectorOperator = "Gt"
NodeSelectorOpLt NodeSelectorOperator = "Lt"
)
// A topology selector term represents the result of label queries.
// A null or empty topology selector term matches no objects.
// The requirements of them are ANDed.
// It provides a subset of functionality as NodeSelectorTerm.
// This is an alpha feature and may change in the future.
// +structType=atomic
type TopologySelectorTerm struct {
// Usage: Fields of type []TopologySelectorTerm must be listType=atomic.
// A list of topology selector requirements by labels.
// +optional
MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" protobuf:"bytes,1,rep,name=matchLabelExpressions"`
}
// A topology selector requirement is a selector that matches given label.
// This is an alpha feature and may change in the future.
type TopologySelectorLabelRequirement struct {
// The label key that the selector applies to.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// An array of string values. One value must match the label to be selected.
// Each entry in Values is ORed.
Values []string `json:"values" protobuf:"bytes,2,rep,name=values"`
}
// Affinity is a group of affinity scheduling rules.
type Affinity struct {
// Describes node affinity scheduling rules for the pod.
// +optional
NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"`
// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"`
// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"`
}
// Pod affinity is a group of inter pod affinity scheduling rules.
type PodAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
type PodAntiAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the anti-affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling anti-affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
type WeightedPodAffinityTerm struct {
// weight associated with matching the corresponding podAffinityTerm,
// in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// Required. A pod affinity term, associated with the corresponding weight.
PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"`
}
// Defines a set of pods (namely those matching the labelSelector
// relative to the given namespace(s)) that this pod should be
// co-located (affinity) or not co-located (anti-affinity) with,
// where co-located is defined as running on a node whose value of
// the label with key <topologyKey> matches that of any node on which
// a pod of the set of pods is running
type PodAffinityTerm struct {
// A label query over a set of resources, in this case pods.
// +optional
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
// namespaces specifies a static list of namespace names that the term applies to.
// The term is applied to the union of the namespaces listed in this field
// and the ones selected by namespaceSelector.
// null or empty namespaces list and null namespaceSelector means "this pod's namespace".
// +optional
Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"`
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
// whose value of the label with key topologyKey matches that of any node on which any of the
// selected pods is running.
// Empty topologyKey is not allowed.
TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"`
// A label query over the set of namespaces that the term applies to.
// The term is applied to the union of the namespaces selected by this field
// and the ones listed in the namespaces field.
// null selector and null or empty namespaces list means "this pod's namespace".
// An empty selector ({}) matches all namespaces.
// +optional
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,4,opt,name=namespaceSelector"`
}
// Node affinity is a group of node affinity scheduling rules.
type NodeAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// will try to eventually evict the pod from its node.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// may or may not try to eventually evict the pod from its node.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node matches the corresponding matchExpressions; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// An empty preferred scheduling term matches all objects with implicit weight 0
// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
type PreferredSchedulingTerm struct {
// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// A node selector term, associated with the corresponding weight.
Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
}
// The node this Taint is attached to has the "effect" on
// any pod that does not tolerate the Taint.
type Taint struct {
// Required. The taint key to be applied to a node.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// The taint value corresponding to the taint key.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Required. The effect of the taint on pods
// that do not tolerate the taint.
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
// TimeAdded represents the time at which the taint was added.
// It is only written for NoExecute taints.
// +optional
TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
}
// +enum
type TaintEffect string
const (
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
// Enforced by the scheduler.
TaintEffectNoSchedule TaintEffect = "NoSchedule"
// Like TaintEffectNoSchedule, but the scheduler tries not to schedule
// new pods onto the node, rather than prohibiting new pods from scheduling
// onto the node entirely. Enforced by the scheduler.
TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// Like TaintEffectNoSchedule, but additionally do not allow pods submitted to
// Kubelet without going through the scheduler to start.
// Enforced by Kubelet and the scheduler.
// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
// Evict any already-running pods that do not tolerate the taint.
// Currently enforced by NodeController.
TaintEffectNoExecute TaintEffect = "NoExecute"
)
// The pod this Toleration is attached to tolerates any taint that matches
// the triple <key,value,effect> using the matching operator <operator>.
type Toleration struct {
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
// +optional
Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
// Operator represents a key's relationship to the value.
// Valid operators are Exists and Equal. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
// +optional
Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
// Value is the taint value the toleration matches to.
// If the operator is Exists, the value should be empty, otherwise just a regular string.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
// Effect indicates the taint effect to match. Empty means match all taint effects.
// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
// +optional
Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
// TolerationSeconds represents the period of time the toleration (which must be
// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
// it is not set, which means tolerate the taint forever (do not evict). Zero and
// negative values will be treated as 0 (evict immediately) by the system.
// +optional
TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"`
}
// A toleration operator is the set of operators that can be used in a toleration.
// +enum
type TolerationOperator string
const (
TolerationOpExists TolerationOperator = "Exists"
TolerationOpEqual TolerationOperator = "Equal"
)
// PodReadinessGate contains the reference to a pod condition
type PodReadinessGate struct {
// ConditionType refers to a condition in the pod's condition list with matching type.
ConditionType PodConditionType `json:"conditionType" protobuf:"bytes,1,opt,name=conditionType,casttype=PodConditionType"`
}
// PodSpec is a description of a pod.
type PodSpec struct {
// List of volumes that can be mounted by containers belonging to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes
// +optional
// +patchMergeKey=name
// +patchStrategy=merge,retainKeys
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
// List of initialization containers belonging to the pod.
// Init containers are executed in order prior to containers being started. If any
// init container fails, the pod is considered to have failed and is handled according
// to its restartPolicy. The name for an init container or normal container must be
// unique among all containers.
// Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
// of that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
// +patchMergeKey=name
// +patchStrategy=merge
InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
// List of containers belonging to the pod.
// Containers cannot currently be added or removed.
// There must be at least one container in a Pod.
// Cannot be updated.
// +patchMergeKey=name
// +patchStrategy=merge
Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
// List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing
// pod to perform user-initiated actions such as debugging. This list cannot be specified when
// creating a pod, and it cannot be modified by updating the pod spec. In order to add an
// ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
// This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"`
// Restart policy for all containers within the pod.
// One of Always, OnFailure, Never.
// Default to Always.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
// +optional
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
// Value must be non-negative integer. The value zero indicates stop immediately via
// the kill signal (no opportunity to shut down).
// If this value is nil, the default grace period will be used instead.
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
// Defaults to 30 seconds.
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
// Optional duration in seconds the pod may be active on the node relative to
// StartTime before the system will actively try to mark it failed and kill associated containers.
// Value must be a positive integer.
// +optional
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
// Set DNS policy for the pod.
// Defaults to "ClusterFirst".
// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
// To have DNS options set along with hostNetwork, you have to specify DNS policy
// explicitly to 'ClusterFirstWithHostNet'.
// +optional
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
// +mapType=atomic
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
// Deprecated: Use serviceAccountName instead.
// +k8s:conversion-gen=false
// +optional
DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"`
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
// requirements.
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
// Host networking requested for this pod. Use the host's network namespace.
// If this option is set, the ports that will be used must be specified.
// Default to false.
// +k8s:conversion-gen=false
// +optional
HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
// Use the host's pid namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
// Use the host's ipc namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
// Share a single process namespace between all of the containers in a pod.
// When this is set containers will be able to view and signal processes from other containers
// in the same pod, and the first process in each container will not be assigned PID 1.
// HostPID and ShareProcessNamespace cannot both be set.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
// If specified, these secrets will be passed to individual puller implementations for them to use.
// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
// Specifies the hostname of the Pod
// If not specified, the pod's hostname will be set to a system-defined value.
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
// If not specified, the pod will not have a domainname at all.
// +optional
Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
// If specified, the pod's scheduling constraints
// +optional
Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
// If specified, the pod will be dispatched by specified scheduler.
// If not specified, the pod will be dispatched by default scheduler.
// +optional
SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
// If specified, the pod's tolerations.
// +optional
Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
// file if specified. This is only valid for non-hostNetwork pods.
// +optional
// +patchMergeKey=ip
// +patchStrategy=merge
HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"`
// If specified, indicates the pod's priority. "system-node-critical" and
// "system-cluster-critical" are two special keywords which indicate the
// highest priorities with the former being the highest priority. Any other
// name must be defined by creating a PriorityClass object with that name.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"`
// The priority value. Various system components use this field to find the
// priority of the pod. When Priority Admission Controller is enabled, it
// prevents users from setting this field. The admission controller populates
// this field from PriorityClassName.
// The higher the value, the higher the priority.
// +optional
Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"`
// Specifies the DNS parameters of a pod.
// Parameters specified here will be merged to the generated DNS
// configuration based on DNSPolicy.
// +optional
DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
// If specified, all readiness gates will be evaluated for pod readiness.
// A pod is ready when all its containers are ready AND
// all conditions specified in the readiness gates have status equal to "True"
// More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
// +optional
ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"`
// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
// to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
// If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
// empty definition that uses the default runtime handler.
// More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
// +optional
RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"`
// EnableServiceLinks indicates whether information about services should be injected into pod's
// environment variables, matching the syntax of Docker links.
// Optional: Defaults to true.
// +optional
EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"`
// PreemptionPolicy is the Policy for preempting pods with lower priority.
// One of Never, PreemptLowerPriority.
// Defaults to PreemptLowerPriority if unset.
// +optional
PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"`
// Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
// This field will be autopopulated at admission time by the RuntimeClass admission controller. If
// the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
// The RuntimeClass admission controller will reject Pod create requests which have the overhead already
// set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
// defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
// More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
// +optional
Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"`
// TopologySpreadConstraints describes how a group of pods ought to spread across topology
// domains. Scheduler will schedule pods in a way which abides by the constraints.
// All topologySpreadConstraints are ANDed.
// +optional
// +patchMergeKey=topologyKey
// +patchStrategy=merge
// +listType=map
// +listMapKey=topologyKey
// +listMapKey=whenUnsatisfiable
TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"`
// If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
// In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
// In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
// If a pod does not have FQDN, this has no effect.
// Default to false.
// +optional
SetHostnameAsFQDN *bool `json:"setHostnameAsFQDN,omitempty" protobuf:"varint,35,opt,name=setHostnameAsFQDN"`
// Specifies the OS of the containers in the pod.
// Some pod and container fields are restricted if this is set.
//
// If the OS field is set to linux, the following fields must be unset:
// -securityContext.windowsOptions
//
// If the OS field is set to windows, following fields must be unset:
// - spec.hostPID
// - spec.hostIPC
// - spec.securityContext.seLinuxOptions
// - spec.securityContext.seccompProfile
// - spec.securityContext.fsGroup
// - spec.securityContext.fsGroupChangePolicy
// - spec.securityContext.sysctls
// - spec.shareProcessNamespace
// - spec.securityContext.runAsUser
// - spec.securityContext.runAsGroup
// - spec.securityContext.supplementalGroups
// - spec.containers[*].securityContext.seLinuxOptions
// - spec.containers[*].securityContext.seccompProfile
// - spec.containers[*].securityContext.capabilities
// - spec.containers[*].securityContext.readOnlyRootFilesystem
// - spec.containers[*].securityContext.privileged
// - spec.containers[*].securityContext.allowPrivilegeEscalation
// - spec.containers[*].securityContext.procMount
// - spec.containers[*].securityContext.runAsUser
// - spec.containers[*].securityContext.runAsGroup
// +optional
// This is a beta field and requires the IdentifyPodOS feature
OS *PodOS `json:"os,omitempty" protobuf:"bytes,36,opt,name=os"`
}
// OSName is the set of OS'es that can be used in OS.
type OSName string
// These are valid values for OSName
const (
Linux OSName = "linux"
Windows OSName = "windows"
)
// PodOS defines the OS parameters of a pod.
type PodOS struct {
// Name is the name of the operating system. The currently supported values are linux and windows.
// Additional value may be defined in future and can be one of:
// https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
// Clients should expect to handle additional values and treat unrecognized values in this field as os: null
Name OSName `json:"name" protobuf:"bytes,1,opt,name=name"`
}
// +enum
type UnsatisfiableConstraintAction string
const (
// DoNotSchedule instructs the scheduler not to schedule the pod
// when constraints are not satisfied.
DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule"
// ScheduleAnyway instructs the scheduler to schedule the pod
// even if constraints are not satisfied.
ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway"
)
// TopologySpreadConstraint specifies how to spread matching pods among the given topology.
type TopologySpreadConstraint struct {
// MaxSkew describes the degree to which pods may be unevenly distributed.
// When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
// between the number of matching pods in the target topology and the global minimum.
// The global minimum is the minimum number of matching pods in an eligible domain
// or zero if the number of eligible domains is less than MinDomains.
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
// labelSelector spread as 2/2/1:
// In this case, the global minimum is 1.
// +-------+-------+-------+
// | zone1 | zone2 | zone3 |
// +-------+-------+-------+
// | P P | P P | P |
// +-------+-------+-------+
// - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
// scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
// violate MaxSkew(1).
// - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
// When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
// to topologies that satisfy it.
// It's a required field. Default value is 1 and 0 is not allowed.
MaxSkew int32 `json:"maxSkew" protobuf:"varint,1,opt,name=maxSkew"`
// TopologyKey is the key of node labels. Nodes that have a label with this key
// and identical values are considered to be in the same topology.
// We consider each <key, value> as a "bucket", and try to put balanced number
// of pods into each bucket.
// We define a domain as a particular instance of a topology.
// Also, we define an eligible domain as a domain whose nodes match the node selector.
// e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
// And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
// It's a required field.
TopologyKey string `json:"topologyKey" protobuf:"bytes,2,opt,name=topologyKey"`
// WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
// the spread constraint.
// - DoNotSchedule (default) tells the scheduler not to schedule it.
// - ScheduleAnyway tells the scheduler to schedule the pod in any location,
// but giving higher precedence to topologies that would help reduce the
// skew.
// A constraint is considered "Unsatisfiable" for an incoming pod
// if and only if every possible node assignment for that pod would violate
// "MaxSkew" on some topology.
// For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
// labelSelector spread as 3/1/1:
// +-------+-------+-------+
// | zone1 | zone2 | zone3 |
// +-------+-------+-------+
// | P P P | P | P |
// +-------+-------+-------+
// If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
// to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
// MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
// won't make it *more* imbalanced.
// It's a required field.
WhenUnsatisfiable UnsatisfiableConstraintAction `json:"whenUnsatisfiable" protobuf:"bytes,3,opt,name=whenUnsatisfiable,casttype=UnsatisfiableConstraintAction"`
// LabelSelector is used to find matching pods.
// Pods that match this label selector are counted to determine the number of pods
// in their corresponding topology domain.
// +optional
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,4,opt,name=labelSelector"`
// MinDomains indicates a minimum number of eligible domains.
// When the number of eligible domains with matching topology keys is less than minDomains,
// Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
// And when the number of eligible domains with matching topology keys equals or greater than minDomains,
// this value has no effect on scheduling.
// As a result, when the number of eligible domains is less than minDomains,
// scheduler won't schedule more than maxSkew Pods to those domains.
// If value is nil, the constraint behaves as if MinDomains is equal to 1.
// Valid values are integers greater than 0.
// When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
//
// For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
// labelSelector spread as 2/2/2:
// +-------+-------+-------+
// | zone1 | zone2 | zone3 |
// +-------+-------+-------+
// | P P | P P | P P |
// +-------+-------+-------+
// The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
// In this situation, new pod with the same labelSelector cannot be scheduled,
// because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
// it will violate MaxSkew.
//
// This is an alpha field and requires enabling MinDomainsInPodTopologySpread feature gate.
// +optional
MinDomains *int32 `json:"minDomains,omitempty" protobuf:"varint,5,opt,name=minDomains"`
}
const (
// The default value for enableServiceLinks attribute.
DefaultEnableServiceLinks = true
)
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
// pod's hosts file.
type HostAlias struct {
// IP address of the host file entry.
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostnames for the above IP address.
Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
}
// PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume
// when volume is mounted.
// +enum
type PodFSGroupChangePolicy string
const (
// FSGroupChangeOnRootMismatch indicates that volume's ownership and permissions will be changed
// only when permission and ownership of root directory does not match with expected
// permissions on the volume. This can help shorten the time it takes to change
// ownership and permissions of a volume.
FSGroupChangeOnRootMismatch PodFSGroupChangePolicy = "OnRootMismatch"
// FSGroupChangeAlways indicates that volume's ownership and permissions
// should always be changed whenever volume is mounted inside a Pod. This the default
// behavior.
FSGroupChangeAlways PodFSGroupChangePolicy = "Always"
)
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
type PodSecurityContext struct {
// The SELinux context to be applied to all containers.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in SecurityContext. If set in
// both SecurityContext and PodSecurityContext, the value specified in SecurityContext
// takes precedence for that container.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"`
// The Windows specific settings applied to all containers.
// If unspecified, the options within a container's SecurityContext will be used.
// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
// Note that this field cannot be set when spec.os.name is linux.
// +optional
WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence
// for that container.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"`
// The GID to run the entrypoint of the container process.
// Uses runtime default if unset.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence
// for that container.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,6,opt,name=runAsGroup"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
// A list of groups applied to the first process run in each container, in addition
// to the container's primary GID. If unspecified, no groups will be added to
// any container.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
// A special supplemental group that applies to all containers in a pod.
// Some volume types allow the Kubelet to change the ownership of that volume
// to be owned by the pod:
//
// 1. The owning GID will be the FSGroup
// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
// 3. The permission bits are OR'd with rw-rw----
//
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"`
// Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
// sysctls (by the container runtime) might fail to launch.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"`
// fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
// before being exposed inside Pod. This field will only apply to
// volume types which support fsGroup based ownership(and permissions).
// It will have no effect on ephemeral volume types such as: secret, configmaps
// and emptydir.
// Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"`
// The seccomp options to use by the containers in this pod.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,10,opt,name=seccompProfile"`
}
// SeccompProfile defines a pod/container's seccomp profile settings.
// Only one profile source may be set.
// +union
type SeccompProfile struct {
// type indicates which kind of seccomp profile will be applied.
// Valid options are:
//
// Localhost - a profile defined in a file on the node should be used.
// RuntimeDefault - the container runtime default profile should be used.
// Unconfined - no profile should be applied.
// +unionDiscriminator
Type SeccompProfileType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=SeccompProfileType"`
// localhostProfile indicates a profile defined in a file on the node should be used.
// The profile must be preconfigured on the node to work.
// Must be a descending path, relative to the kubelet's configured seccomp profile location.
// Must only be set if type is "Localhost".
// +optional
LocalhostProfile *string `json:"localhostProfile,omitempty" protobuf:"bytes,2,opt,name=localhostProfile"`
}
// SeccompProfileType defines the supported seccomp profile types.
// +enum
type SeccompProfileType string
const (
// SeccompProfileTypeUnconfined indicates no seccomp profile is applied (A.K.A. unconfined).
SeccompProfileTypeUnconfined SeccompProfileType = "Unconfined"
// SeccompProfileTypeRuntimeDefault represents the default container runtime seccomp profile.
SeccompProfileTypeRuntimeDefault SeccompProfileType = "RuntimeDefault"
// SeccompProfileTypeLocalhost indicates a profile defined in a file on the node should be used.
// The file's location relative to <kubelet-root-dir>/seccomp.
SeccompProfileTypeLocalhost SeccompProfileType = "Localhost"
)
// PodQOSClass defines the supported qos classes of Pods.
// +enum
type PodQOSClass string
const (
// PodQOSGuaranteed is the Guaranteed qos class.
PodQOSGuaranteed PodQOSClass = "Guaranteed"
// PodQOSBurstable is the Burstable qos class.
PodQOSBurstable PodQOSClass = "Burstable"
// PodQOSBestEffort is the BestEffort qos class.
PodQOSBestEffort PodQOSClass = "BestEffort"
)
// PodDNSConfig defines the DNS parameters of a pod in addition to
// those generated from DNSPolicy.
type PodDNSConfig struct {
// A list of DNS name server IP addresses.
// This will be appended to the base nameservers generated from DNSPolicy.
// Duplicated nameservers will be removed.
// +optional
Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"`
// A list of DNS search domains for host-name lookup.
// This will be appended to the base search paths generated from DNSPolicy.
// Duplicated search paths will be removed.
// +optional
Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"`
// A list of DNS resolver options.
// This will be merged with the base options generated from DNSPolicy.
// Duplicated entries will be removed. Resolution options given in Options
// will override those that appear in the base DNSPolicy.
// +optional
Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"`
}
// PodDNSConfigOption defines DNS resolver options of a pod.
type PodDNSConfigOption struct {
// Required.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// +optional
Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
}
// IP address information for entries in the (plural) PodIPs field.
// Each entry includes:
// IP: An IP address allocated to the pod. Routable at least within the cluster.
type PodIP struct {
// ip is an IP address (IPv4 or IPv6) assigned to the pod
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
}
// EphemeralContainerCommon is a copy of all fields in Container to be inlined in
// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer
// to Container and allows separate documentation for the fields of EphemeralContainer.
// When a new field is added to Container it must be added here as well.
type EphemeralContainerCommon struct {
// Name of the ephemeral container specified as a DNS_LABEL.
// This name must be unique among all containers, init containers and ephemeral containers.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Container image name.
// More info: https://kubernetes.io/docs/concepts/containers/images
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
// to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
// produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
// of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Container's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// Ports are not allowed for ephemeral containers.
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
// +listType=map
// +listMapKey=containerPort
// +listMapKey=protocol
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
// Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
// already allocated to the pod.
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the container.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
// Probes are not allowed for ephemeral containers.
// +optional
LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Probes are not allowed for ephemeral containers.
// +optional
ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// Probes are not allowed for ephemeral containers.
// +optional
StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"`
// Lifecycle is not allowed for ephemeral containers.
// +optional
Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Optional: Path at which the file to which the container's termination message
// will be written is mounted into the container's filesystem.
// Message written is intended to be brief final status, such as an assertion failure message.
// Will be truncated by the node if greater than 4096 bytes. The total message length across
// all containers will be limited to 12kb.
// Defaults to /dev/termination-log.
// Cannot be updated.
// +optional
TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Indicate how the termination message should be populated. File will use the contents of
// terminationMessagePath to populate the container status message on both success and failure.
// FallbackToLogsOnError will use the last chunk of container log output if the termination
// message file is empty and the container exited with an error.
// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
// Defaults to File.
// Cannot be updated.
// +optional
TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// Optional: SecurityContext defines the security options the ephemeral container should be run with.
// If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
// and shouldn't be used for general purpose containers.
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
// Default is false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the container is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
// +optional
StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
// Default is false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
}
// EphemeralContainerCommon converts to Container. All fields must be kept in sync between
// these two types.
var _ = Container(EphemeralContainerCommon{})
// An EphemeralContainer is a temporary container that you may add to an existing Pod for
// user-initiated activities such as debugging. Ephemeral containers have no resource or
// scheduling guarantees, and they will not be restarted when they exit or when a Pod is
// removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the
// Pod to exceed its resource allocation.
//
// To add an ephemeral container, use the ephemeralcontainers subresource of an existing
// Pod. Ephemeral containers may not be removed or restarted.
//
// This is a beta feature available on clusters that haven't disabled the EphemeralContainers feature gate.
type EphemeralContainer struct {
// Ephemeral containers have all of the fields of Container, plus additional fields
// specific to ephemeral containers. Fields in common with Container are in the
// following inlined struct so than an EphemeralContainer may easily be converted
// to a Container.
EphemeralContainerCommon `json:",inline" protobuf:"bytes,1,req"`
// If set, the name of the container from PodSpec that this ephemeral container targets.
// The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.
// If not set then the ephemeral container uses the namespaces configured in the Pod spec.
//
// The container runtime must implement support for this feature. If the runtime does not
// support namespace targeting then the result of setting this field is undefined.
// +optional
TargetContainerName string `json:"targetContainerName,omitempty" protobuf:"bytes,2,opt,name=targetContainerName"`
}
// PodStatus represents information about the status of a pod. Status may trail the actual
// state of a system, especially if the node that hosts the pod cannot contact the control
// plane.
type PodStatus struct {
// The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle.
// The conditions array, the reason and message fields, and the individual container status
// arrays contain more detail about the pod's status.
// There are five possible phase values:
//
// Pending: The pod has been accepted by the Kubernetes system, but one or more of the
// container images has not been created. This includes time before being scheduled as
// well as time spent downloading images over the network, which could take a while.
// Running: The pod has been bound to a node, and all of the containers have been created.
// At least one container is still running, or is in the process of starting or restarting.
// Succeeded: All containers in the pod have terminated in success, and will not be restarted.
// Failed: All containers in the pod have terminated, and at least one container has
// terminated in failure. The container either exited with non-zero status or was terminated
// by the system.
// Unknown: For some reason the state of the pod could not be obtained, typically due to an
// error in communicating with the host of the pod.
//
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
// +optional
Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"`
// Current service state of pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
// A human readable message indicating details about why the pod is in this condition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// A brief CamelCase message indicating details about why the pod is in this state.
// e.g. 'Evicted'
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be
// scheduled right away as preemption victims receive their graceful termination periods.
// This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide
// to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to
// give the resources on this node to a higher priority pod that is created after preemption.
// As a result, this field may be different than PodSpec.nodeName when the pod is
// scheduled.
// +optional
NominatedNodeName string `json:"nominatedNodeName,omitempty" protobuf:"bytes,11,opt,name=nominatedNodeName"`
// IP address of the host to which the pod is assigned. Empty if not yet scheduled.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
// IP address allocated to the pod. Routable at least within the cluster.
// Empty if not yet allocated.
// +optional
PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
// podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must
// match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list
// is empty if no IPs have been allocated yet.
// +optional
// +patchStrategy=merge
// +patchMergeKey=ip
PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"`
// RFC 3339 date and time at which the object was acknowledged by the Kubelet.
// This is before the Kubelet pulled the container image(s) for the pod.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
// The list has one entry per init container in the manifest. The most recent successful
// init container will have ready = true, the most recently started container will have
// startTime set.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
// The list has one entry per container in the manifest.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
// +optional
ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
// The Quality of Service (QOS) classification assigned to the pod based on resource requirements
// See PodQOSClass type for available QOS classes
// More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md
// +optional
QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"`
// Status for any ephemeral containers that have run in this pod.
// This field is beta-level and available on clusters that haven't disabled the EphemeralContainers feature gate.
// +optional
EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
type PodStatusResult struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// +genclient
// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Pod is a collection of containers that can run on a host. This resource is created
// by clients and scheduled onto hosts.
type Pod struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodList is a list of Pods.
type PodList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pods.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md
Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PodTemplateSpec describes the data a pod should have when created from a template
type PodTemplateSpec struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplate describes a template for creating copies of a predefined pod.
type PodTemplate struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Template defines the pods that will be created from this pod template.
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplateList is a list of PodTemplates.
type PodTemplateList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pod templates
Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ReplicationControllerSpec is the specification of a replication controller.
type ReplicationControllerSpec struct {
// Replicas is the number of desired replicas.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// Selector is a label query over pods that should match the Replicas count.
// If Selector is empty, it is defaulted to the labels present on the Pod template.
// Label keys and values that must match in order to be controlled by this replication
// controller, if empty defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
// +mapType=atomic
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// TemplateRef is a reference to an object that describes the pod that will be created if
// insufficient replicas are detected.
// Reference to an object that describes the pod that will be created if insufficient replicas are detected.
// +optional
// TemplateRef *ObjectReference `json:"templateRef,omitempty"`
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. This takes precedence over a TemplateRef.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
// +optional
Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// ReplicationControllerStatus represents the current status of a replication
// controller.
type ReplicationControllerStatus struct {
// Replicas is the most recently oberved number of replicas.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
// The number of pods that have labels matching the labels of the pod template of the replication controller.
// +optional
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
// The number of ready replicas for this replication controller.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
// The number of available replicas (ready for at least minReadySeconds) for this replication controller.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
// ObservedGeneration reflects the generation of the most recently observed replication controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
// Represents the latest available observations of a replication controller's current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
}
type ReplicationControllerConditionType string
// These are valid conditions of a replication controller.
const (
// ReplicationControllerReplicaFailure is added in a replication controller when one of its pods
// fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors,
// etc. or deleted due to kubelet being down or finalizers are failing.
ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure"
)
// ReplicationControllerCondition describes the state of a replication controller at a certain point.
type ReplicationControllerCondition struct {
// Type of replication controller condition.
Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationController represents the configuration of a replication controller.
type ReplicationController struct {
metav1.TypeMeta `json:",inline"`
// If the Labels of a ReplicationController are empty, they are defaulted to
// be the same as the Pod(s) that the replication controller manages.
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the specification of the desired behavior of the replication controller.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status is the most recently observed status of the replication controller.
// This data may be out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationControllerList is a collection of replication controllers.
type ReplicationControllerList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of replication controllers.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Session Affinity Type string
// +enum
type ServiceAffinity string
const (
// ServiceAffinityClientIP is the Client IP based.
ServiceAffinityClientIP ServiceAffinity = "ClientIP"
// ServiceAffinityNone - no session affinity.
ServiceAffinityNone ServiceAffinity = "None"
)
const DefaultClientIPServiceAffinitySeconds int32 = 10800
// SessionAffinityConfig represents the configurations of session affinity.
type SessionAffinityConfig struct {
// clientIP contains the configurations of Client IP based session affinity.
// +optional
ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"`
}
// ClientIPConfig represents the configurations of Client IP based session affinity.
type ClientIPConfig struct {
// timeoutSeconds specifies the seconds of ClientIP type session sticky time.
// The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
// Default value is 10800(for 3 hours).
// +optional
TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
}
// Service Type string describes ingress methods for a service
// +enum
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the cluster IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
// ServiceTypeExternalName means a service consists of only a reference to
// an external name that kubedns or equivalent will return as a CNAME
// record, with no exposing or proxying of any pods involved.
ServiceTypeExternalName ServiceType = "ExternalName"
)
// ServiceInternalTrafficPolicyType describes the type of traffic routing for
// internal traffic
// +enum
type ServiceInternalTrafficPolicyType string
const (
// ServiceInternalTrafficPolicyCluster routes traffic to all endpoints
ServiceInternalTrafficPolicyCluster ServiceInternalTrafficPolicyType = "Cluster"
// ServiceInternalTrafficPolicyLocal only routes to node-local
// endpoints, otherwise drops the traffic
ServiceInternalTrafficPolicyLocal ServiceInternalTrafficPolicyType = "Local"
)
// Service External Traffic Policy Type string
// +enum
type ServiceExternalTrafficPolicyType string
const (
// ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior.
ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local"
// ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior.
ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
)
// These are the valid conditions of a service.
const (
// LoadBalancerPortsError represents the condition of the requested ports
// on the cloud load balancer instance.
LoadBalancerPortsError = "LoadBalancerPortsError"
)
// ServiceStatus represents the current status of a service.
type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
// +optional
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
// Current service state
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
// +listType=map
// +listMapKey=type
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// LoadBalancerStatus represents the status of a load-balancer.
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer.
// Traffic intended for the service should be sent to these ingress points.
// +optional
Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
// +optional
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
// Ports is a list of records of service ports
// If used, every port defined in the service should have an entry in it
// +listType=atomic
// +optional
Ports []PortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"`
}
// IPFamily represents the IP Family (IPv4 or IPv6). This type is used
// to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
// +enum
type IPFamily string
const (
// IPv4Protocol indicates that this IP is IPv4 protocol
IPv4Protocol IPFamily = "IPv4"
// IPv6Protocol indicates that this IP is IPv6 protocol
IPv6Protocol IPFamily = "IPv6"
)
// IPFamilyPolicyType represents the dual-stack-ness requested or required by a Service
// +enum
type IPFamilyPolicyType string
const (
// IPFamilyPolicySingleStack indicates that this service is required to have a single IPFamily.
// The IPFamily assigned is based on the default IPFamily used by the cluster
// or as identified by service.spec.ipFamilies field
IPFamilyPolicySingleStack IPFamilyPolicyType = "SingleStack"
// IPFamilyPolicyPreferDualStack indicates that this service prefers dual-stack when
// the cluster is configured for dual-stack. If the cluster is not configured
// for dual-stack the service will be assigned a single IPFamily. If the IPFamily is not
// set in service.spec.ipFamilies then the service will be assigned the default IPFamily
// configured on the cluster
IPFamilyPolicyPreferDualStack IPFamilyPolicyType = "PreferDualStack"
// IPFamilyPolicyRequireDualStack indicates that this service requires dual-stack. Using
// IPFamilyPolicyRequireDualStack on a single stack cluster will result in validation errors. The
// IPFamilies (and their order) assigned to this service is based on service.spec.ipFamilies. If
// service.spec.ipFamilies was not provided then it will be assigned according to how they are
// configured on the cluster. If service.spec.ipFamilies has only one entry then the alternative
// IPFamily will be added by apiserver
IPFamilyPolicyRequireDualStack IPFamilyPolicyType = "RequireDualStack"
)
// ServiceSpec describes the attributes that a user creates on a service.
type ServiceSpec struct {
// The list of ports that are exposed by this service.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +patchMergeKey=port
// +patchStrategy=merge
// +listType=map
// +listMapKey=port
// +listMapKey=protocol
Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
// Route service traffic to pods with label keys and values matching this
// selector. If empty or not present, the service is assumed to have an
// external process managing its endpoints, which Kubernetes will not
// modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
// Ignored if type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/
// +optional
// +mapType=atomic
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// clusterIP is the IP address of the service and is usually assigned
// randomly. If an address is specified manually, is in-range (as per
// system configuration), and is not in use, it will be allocated to the
// service; otherwise creation of the service will fail. This field may not
// be changed through updates unless the type field is also being changed
// to ExternalName (which requires this field to be blank) or the type
// field is being changed from ExternalName (in which case this field may
// optionally be specified, as describe above). Valid values are "None",
// empty string (""), or a valid IP address. Setting this to "None" makes a
// "headless service" (no virtual IP), which is useful when direct endpoint
// connections are preferred and proxying is not required. Only applies to
// types ClusterIP, NodePort, and LoadBalancer. If this field is specified
// when creating a Service of type ExternalName, creation will fail. This
// field will be wiped when updating a Service to type ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"`
// ClusterIPs is a list of IP addresses assigned to this service, and are
// usually assigned randomly. If an address is specified manually, is
// in-range (as per system configuration), and is not in use, it will be
// allocated to the service; otherwise creation of the service will fail.
// This field may not be changed through updates unless the type field is
// also being changed to ExternalName (which requires this field to be
// empty) or the type field is being changed from ExternalName (in which
// case this field may optionally be specified, as describe above). Valid
// values are "None", empty string (""), or a valid IP address. Setting
// this to "None" makes a "headless service" (no virtual IP), which is
// useful when direct endpoint connections are preferred and proxying is
// not required. Only applies to types ClusterIP, NodePort, and
// LoadBalancer. If this field is specified when creating a Service of type
// ExternalName, creation will fail. This field will be wiped when updating
// a Service to type ExternalName. If this field is not specified, it will
// be initialized from the clusterIP field. If this field is specified,
// clients must ensure that clusterIPs[0] and clusterIP have the same
// value.
//
// This field may hold a maximum of two entries (dual-stack IPs, in either order).
// These IPs must correspond to the values of the ipFamilies field. Both
// clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +listType=atomic
// +optional
ClusterIPs []string `json:"clusterIPs,omitempty" protobuf:"bytes,18,opt,name=clusterIPs"`
// type determines how the Service is exposed. Defaults to ClusterIP. Valid
// options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
// "ClusterIP" allocates a cluster-internal IP address for load-balancing
// to endpoints. Endpoints are determined by the selector or if that is not
// specified, by manual construction of an Endpoints object or
// EndpointSlice objects. If clusterIP is "None", no virtual IP is
// allocated and the endpoints are published as a set of endpoints rather
// than a virtual IP.
// "NodePort" builds on ClusterIP and allocates a port on every node which
// routes to the same endpoints as the clusterIP.
// "LoadBalancer" builds on NodePort and creates an external load-balancer
// (if supported in the current cloud) which routes to the same endpoints
// as the clusterIP.
// "ExternalName" aliases this service to the specified externalName.
// Several other fields do not apply to ExternalName services.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
// +optional
Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
// externalIPs is a list of IP addresses for which nodes in the cluster
// will also accept traffic for this service. These IPs are not managed by
// Kubernetes. The user is responsible for ensuring that traffic arrives
// at a node with this IP. A common example is external load-balancers
// that are not part of the Kubernetes system.
// +optional
ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"`
// Supports "ClientIP" and "None". Used to maintain session affinity.
// Enable client IP based session affinity.
// Must be ClientIP or None.
// Defaults to None.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"`
// Only applies to Service Type: LoadBalancer.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
// Deprecated: This field was under-specified and its meaning varies across implementations,
// and it cannot support dual-stack.
// As of Kubernetes v1.24, users are encouraged to use implementation-specific annotations when available.
// This field may be removed in a future API version.
// +optional
LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
// If specified and supported by the platform, this will restrict traffic through the cloud-provider
// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
// cloud-provider does not support the feature."
// More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
// +optional
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
// externalName is the external reference that discovery mechanisms will
// return as an alias for this service (e.g. a DNS CNAME record). No
// proxying will be involved. Must be a lowercase RFC-1123 hostname
// (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName".
// +optional
ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"`
// externalTrafficPolicy denotes if this Service desires to route external
// traffic to node-local or cluster-wide endpoints. "Local" preserves the
// client source IP and avoids a second hop for LoadBalancer and Nodeport
// type services, but risks potentially imbalanced traffic spreading.
// "Cluster" obscures the client source IP and may cause a second hop to
// another node, but should have good overall load-spreading.
// +optional
ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"`
// healthCheckNodePort specifies the healthcheck nodePort for the service.
// This only applies when type is set to LoadBalancer and
// externalTrafficPolicy is set to Local. If a value is specified, is
// in-range, and is not in use, it will be used. If not specified, a value
// will be automatically allocated. External systems (e.g. load-balancers)
// can use this port to determine if a given node holds endpoints for this
// service or not. If this field is specified when creating a Service
// which does not need it, creation will fail. This field will be wiped
// when updating a Service to no longer need it (e.g. changing type).
// +optional
HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"`
// publishNotReadyAddresses indicates that any agent which deals with endpoints for this
// Service should disregard any indications of ready/not-ready.
// The primary use case for setting this field is for a StatefulSet's Headless Service to
// propagate SRV DNS records for its Pods for the purpose of peer discovery.
// The Kubernetes controllers that generate Endpoints and EndpointSlice resources for
// Services interpret this to mean that all endpoints are considered "ready" even if the
// Pods themselves are not. Agents which consume only Kubernetes generated endpoints
// through the Endpoints or EndpointSlice resources can safely assume this behavior.
// +optional
PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
// sessionAffinityConfig contains the configurations of session affinity.
// +optional
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
// TopologyKeys is tombstoned to show why 16 is reserved protobuf tag.
//TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"`
// IPFamily is tombstoned to show why 15 is a reserved protobuf tag.
// IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"`
// IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
// service. This field is usually assigned automatically based on cluster
// configuration and the ipFamilyPolicy field. If this field is specified
// manually, the requested family is available in the cluster,
// and ipFamilyPolicy allows it, it will be used; otherwise creation of
// the service will fail. This field is conditionally mutable: it allows
// for adding or removing a secondary IP family, but it does not allow
// changing the primary IP family of the Service. Valid values are "IPv4"
// and "IPv6". This field only applies to Services of types ClusterIP,
// NodePort, and LoadBalancer, and does apply to "headless" services.
// This field will be wiped when updating a Service to type ExternalName.
//
// This field may hold a maximum of two entries (dual-stack families, in
// either order). These families must correspond to the values of the
// clusterIPs field, if specified. Both clusterIPs and ipFamilies are
// governed by the ipFamilyPolicy field.
// +listType=atomic
// +optional
IPFamilies []IPFamily `json:"ipFamilies,omitempty" protobuf:"bytes,19,opt,name=ipFamilies,casttype=IPFamily"`
// IPFamilyPolicy represents the dual-stack-ness requested or required by
// this Service. If there is no value provided, then this field will be set
// to SingleStack. Services can be "SingleStack" (a single IP family),
// "PreferDualStack" (two IP families on dual-stack configured clusters or
// a single IP family on single-stack clusters), or "RequireDualStack"
// (two IP families on dual-stack configured clusters, otherwise fail). The
// ipFamilies and clusterIPs fields depend on the value of this field. This
// field will be wiped when updating a service to type ExternalName.
// +optional
IPFamilyPolicy *IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicyType"`
// allocateLoadBalancerNodePorts defines if NodePorts will be automatically
// allocated for services with type LoadBalancer. Default is "true". It
// may be set to "false" if the cluster load-balancer does not rely on
// NodePorts. If the caller requests specific NodePorts (by specifying a
// value), those requests will be respected, regardless of this field.
// This field may only be set for services with type LoadBalancer and will
// be cleared if the type is changed to any other type.
// +optional
AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty" protobuf:"bytes,20,opt,name=allocateLoadBalancerNodePorts"`
// loadBalancerClass is the class of the load balancer implementation this Service belongs to.
// If specified, the value of this field must be a label-style identifier, with an optional prefix,
// e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users.
// This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load
// balancer implementation is used, today this is typically done through the cloud provider integration,
// but should apply for any default implementation. If set, it is assumed that a load balancer
// implementation is watching for Services with a matching class. Any default load balancer
// implementation (e.g. cloud providers) should ignore Services that set this field.
// This field can only be set when creating or updating a Service to type 'LoadBalancer'.
// Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.
// +featureGate=LoadBalancerClass
// +optional
LoadBalancerClass *string `json:"loadBalancerClass,omitempty" protobuf:"bytes,21,opt,name=loadBalancerClass"`
// InternalTrafficPolicy specifies if the cluster internal traffic
// should be routed to all endpoints or node-local endpoints only.
// "Cluster" routes internal traffic to a Service to all endpoints.
// "Local" routes traffic to node-local endpoints only, traffic is
// dropped if no node-local endpoints are ready.
// The default value is "Cluster".
// +featureGate=ServiceInternalTrafficPolicy
// +optional
InternalTrafficPolicy *ServiceInternalTrafficPolicyType `json:"internalTrafficPolicy,omitempty" protobuf:"bytes,22,opt,name=internalTrafficPolicy"`
}
// ServicePort contains information on service's port.
type ServicePort struct {
// The name of this port within the service. This must be a DNS_LABEL.
// All ports within a ServiceSpec must have unique names. When considering
// the endpoints for a Service, this must match the 'name' field in the
// EndpointPort.
// Optional if only one ServicePort is defined on this service.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
// Default is TCP.
// +default="TCP"
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
// The application protocol for this port.
// This field follows standard Kubernetes label syntax.
// Un-prefixed names are reserved for IANA standard service names (as per
// RFC-6335 and https://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
// mycompany.com/my-custom-protocol.
// +optional
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"`
// The port that will be exposed by this service.
Port int32 `json:"port" protobuf:"varint,3,opt,name=port"`
// Number or name of the port to access on the pods targeted by the service.
// Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
// If this is a string, it will be looked up as a named port in the
// target Pod's container ports. If this is not specified, the value
// of the 'port' field is used (an identity map).
// This field is ignored for services with clusterIP=None, and should be
// omitted or set equal to the 'port' field.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
// +optional
TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"`
// The port on each node on which this service is exposed when type is
// NodePort or LoadBalancer. Usually assigned by the system. If a value is
// specified, in-range, and not in use it will be used, otherwise the
// operation will fail. If not specified, a port will be allocated if this
// Service requires one. If this field is specified when creating a
// Service which does not need it, creation will fail. This field will be
// wiped when updating a Service to no longer need it (e.g. changing type
// from NodePort to ClusterIP).
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
// +optional
NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"`
}
// +genclient
// +genclient:skipVerbs=deleteCollection
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Service is a named abstraction of software service (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the selector that determines which pods
// will answer requests sent through the proxy.
type Service struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a service.
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the service.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
const (
// ClusterIPNone - do not assign a cluster IP
// no proxying required and no environment variables should be created for pods
ClusterIPNone = "None"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceList holds a list of services.
type ServiceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of services
Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccount binds together:
// * a name, understood by users, and perhaps by peripheral systems, for an identity
// * a principal that can be authenticated and authorized
// * a set of secrets
type ServiceAccount struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use.
// Pods are only limited to this list if this service account has a "kubernetes.io/enforce-mountable-secrets" annotation set to "true".
// This field should not be used to find auto-generated service account token secrets for use outside of pods.
// Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
// in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
// can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
// More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
// +optional
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"`
// AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
// Can be overridden at the pod level.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccountList is a list of ServiceAccount objects
type ServiceAccountList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ServiceAccounts.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Endpoints is a collection of endpoints that implement the actual service. Example:
// Name: "mysvc",
// Subsets: [
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// },
// {
// Addresses: [{"ip": "10.10.3.3"}],
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
type Endpoints struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The set of all endpoints is the union of all subsets. Addresses are placed into
// subsets according to the IPs they share. A single address with multiple ports,
// some of which are ready and some of which are not (because they come from
// different containers) will result in the address being displayed in different
// subsets for the different ports. No address will appear in both Addresses and
// NotReadyAddresses in the same subset.
// Sets of addresses and ports that comprise a service.
// +optional
Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"`
}
// EndpointSubset is a group of addresses with a common set of ports. The
// expanded set of endpoints is the Cartesian product of Addresses x Ports.
// For example, given:
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// }
// The resulting set of endpoints can be viewed as:
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
type EndpointSubset struct {
// IP addresses which offer the related ports that are marked as ready. These endpoints
// should be considered safe for load balancers and clients to utilize.
// +optional
Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"`
// IP addresses which offer the related ports but are not currently marked as ready
// because they have not yet finished starting, have recently failed a readiness check,
// or have recently failed a liveness check.
// +optional
NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"`
// Port numbers available on the related IP addresses.
// +optional
Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
}
// EndpointAddress is a tuple that describes single IP address.
// +structType=atomic
type EndpointAddress struct {
// The IP of this endpoint.
// May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
// or link-local multicast ((224.0.0.0/24).
// IPv6 is also accepted but not fully supported on all platforms. Also, certain
// kubernetes components, like kube-proxy, are not IPv6 ready.
// TODO: This should allow hostname or IP, See #4447.
IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
// The Hostname of this endpoint
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
// Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
// +optional
NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"`
// Reference to object providing the endpoint.
// +optional
TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"`
}
// EndpointPort is a tuple that describes a single port.
// +structType=atomic
type EndpointPort struct {
// The name of this port. This must match the 'name' field in the
// corresponding ServicePort.
// Must be a DNS_LABEL.
// Optional only if one port is defined.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The port number of the endpoint.
Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
// The IP protocol for this port.
// Must be UDP, TCP, or SCTP.
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
// The application protocol for this port.
// This field follows standard Kubernetes label syntax.
// Un-prefixed names are reserved for IANA standard service names (as per
// RFC-6335 and https://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
// mycompany.com/my-custom-protocol.
// +optional
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EndpointsList is a list of endpoints.
type EndpointsList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of endpoints.
Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// NodeSpec describes the attributes that a node is created with.
type NodeSpec struct {
// PodCIDR represents the pod IP range assigned to the node.
// +optional
PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
// podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this
// field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for
// each of IPv4 and IPv6.
// +optional
// +patchStrategy=merge
PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"`
// ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
// +optional
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
// +optional
Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
// If specified, the node's taints.
// +optional
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
// Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26.
// +optional
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
// Deprecated. Not all kubelets will set this field. Remove field after 1.13.
// see: https://issues.k8s.io/61966
// +optional
DoNotUseExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
}
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
// This API is deprecated since 1.22
type NodeConfigSource struct {
// For historical context, regarding the below kind, apiVersion, and configMapRef deprecation tags:
// 1. kind/apiVersion were used by the kubelet to persist this struct to disk (they had no protobuf tags)
// 2. configMapRef and proto tag 1 were used by the API to refer to a configmap,
// but used a generic ObjectReference type that didn't really have the fields we needed
// All uses/persistence of the NodeConfigSource struct prior to 1.11 were gated by alpha feature flags,
// so there was no persisted data for these fields that needed to be migrated/handled.
// +k8s:deprecated=kind
// +k8s:deprecated=apiVersion
// +k8s:deprecated=configMapRef,protobuf=1
// ConfigMap is a reference to a Node's ConfigMap
ConfigMap *ConfigMapNodeConfigSource `json:"configMap,omitempty" protobuf:"bytes,2,opt,name=configMap"`
}
// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
// This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration
type ConfigMapNodeConfigSource struct {
// Namespace is the metadata.namespace of the referenced ConfigMap.
// This field is required in all cases.
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
// Name is the metadata.name of the referenced ConfigMap.
// This field is required in all cases.
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
// UID is the metadata.UID of the referenced ConfigMap.
// This field is forbidden in Node.Spec, and required in Node.Status.
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"`
// ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap.
// This field is forbidden in Node.Spec, and required in Node.Status.
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
// KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure
// This field is required in all cases.
KubeletConfigKey string `json:"kubeletConfigKey" protobuf:"bytes,5,opt,name=kubeletConfigKey"`
}
// DaemonEndpoint contains information about a single Daemon endpoint.
type DaemonEndpoint struct {
/*
The port tag was not properly in quotes in earlier releases, so it must be
uppercased for backwards compat (since it was falling back to var name of
'Port').
*/
// Port number of the given endpoint.
Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"`
}
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
type NodeDaemonEndpoints struct {
// Endpoint on which Kubelet is listening.
// +optional
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid
SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"`
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
// Kubelet Version reported by the node.
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
// KubeProxy Version reported by the node.
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
}
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
type NodeConfigStatus struct {
// Assigned reports the checkpointed config the node will try to use.
// When Node.Spec.ConfigSource is updated, the node checkpoints the associated
// config payload to local disk, along with a record indicating intended
// config. The node refers to this record to choose its config checkpoint, and
// reports this record in Assigned. Assigned only updates in the status after
// the record has been checkpointed to disk. When the Kubelet is restarted,
// it tries to make the Assigned config the Active config by loading and
// validating the checkpointed payload identified by Assigned.
// +optional
Assigned *NodeConfigSource `json:"assigned,omitempty" protobuf:"bytes,1,opt,name=assigned"`
// Active reports the checkpointed config the node is actively using.
// Active will represent either the current version of the Assigned config,
// or the current LastKnownGood config, depending on whether attempting to use the
// Assigned config results in an error.
// +optional
Active *NodeConfigSource `json:"active,omitempty" protobuf:"bytes,2,opt,name=active"`
// LastKnownGood reports the checkpointed config the node will fall back to
// when it encounters an error attempting to use the Assigned config.
// The Assigned config becomes the LastKnownGood config when the node determines
// that the Assigned config is stable and correct.
// This is currently implemented as a 10-minute soak period starting when the local
// record of Assigned config is updated. If the Assigned config is Active at the end
// of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
// reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
// because the local default config is always assumed good.
// You should not make assumptions about the node's method of determining config stability
// and correctness, as this may change or become configurable in the future.
// +optional
LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" protobuf:"bytes,3,opt,name=lastKnownGood"`
// Error describes any problems reconciling the Spec.ConfigSource to the Active config.
// Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
// record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
// to load or validate the Assigned config, etc.
// Errors may occur at different points while syncing config. Earlier errors (e.g. download or
// checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
// Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
// a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
// by fixing the config assigned in Spec.ConfigSource.
// You can find additional information for debugging by searching the error message in the Kubelet log.
// Error is a human-readable description of the error state; machines can check whether or not Error
// is empty, but should not rely on the stability of the Error text across Kubelet versions.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// NodeStatus is information about the current status of a node.
type NodeStatus struct {
// Capacity represents the total resources of a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Allocatable represents the resources of a node that are available for scheduling.
// Defaults to Capacity.
// +optional
Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"`
// NodePhase is the recently observed lifecycle phase of the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
// The field is never populated, and now is deprecated.
// +optional
Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
// Conditions is an array of current observed node conditions.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
// List of addresses reachable to the node.
// Queried from cloud provider, if available.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
// Note: This field is declared as mergeable, but the merge key is not sufficiently
// unique, which can cause data corruption when it is merged. Callers should instead
// use a full-replacement patch. See http://pr.k8s.io/79391 for an example.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
// Endpoints of daemons running on the Node.
// +optional
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
// Set of ids/uuids to uniquely identify the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#info
// +optional
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
// List of container images on this node
// +optional
Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
// List of attachable volumes in use (mounted) by the node.
// +optional
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
// List of volumes that are attached to the node.
// +optional
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
// Status of the config assigned to the node via the dynamic Kubelet config feature.
// +optional
Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
}
type UniqueVolumeName string
// AttachedVolume describes a volume attached to a node
type AttachedVolume struct {
// Name of the attached volume
Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"`
// DevicePath represents the device path where the volume should be available
DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"`
}
// AvoidPods describes pods that should avoid this node. This is the value for a
// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and
// will eventually become a field of NodeStatus.
type AvoidPods struct {
// Bounded-sized list of signatures of pods that should avoid this node, sorted
// in timestamp order from oldest to newest. Size of the slice is unspecified.
// +optional
PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"`
}
// Describes a class of pods that should avoid this node.
type PreferAvoidPodsEntry struct {
// The class of pods.
PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"`
// Time at which this entry was added to the list.
// +optional
EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"`
// (brief) reason why this entry was added to the list.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Human readable message indicating why this entry was added to the list.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
}
// Describes the class of pods that should avoid this node.
// Exactly one field should be set.
type PodSignature struct {
// Reference to controller whose pods should avoid this node.
// +optional
PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"`
}
// Describe a container image
type ContainerImage struct {
// Names by which this image is known.
// e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
// +optional
Names []string `json:"names" protobuf:"bytes,1,rep,name=names"`
// The size of the image in bytes.
// +optional
SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"`
}
// +enum
type NodePhase string
// These are the valid phases of node.
const (
// NodePending means the node has been created/added by the system, but not configured.
NodePending NodePhase = "Pending"
// NodeRunning means the node has been configured and has Kubernetes components running.
NodeRunning NodePhase = "Running"
// NodeTerminated means the node has been removed from the cluster.
NodeTerminated NodePhase = "Terminated"
)
type NodeConditionType string
// These are valid but not exhaustive conditions of node. A cloud provider may set a condition not listed here.
// The built-in set of conditions are:
// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable.
const (
// NodeReady means kubelet is healthy and ready to accept pods.
NodeReady NodeConditionType = "Ready"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
NodeDiskPressure NodeConditionType = "DiskPressure"
// NodePIDPressure means the kubelet is under pressure due to insufficient available PID.
NodePIDPressure NodeConditionType = "PIDPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
)
// NodeCondition contains condition information for a node.
type NodeCondition struct {
// Type of node condition.
Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we got an update on a given condition.
// +optional
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"`
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
type NodeAddressType string
// These are built-in addresses type of node. A cloud provider may set a type not listed here.
const (
// NodeHostName identifies a name of the node. Although every node can be assumed
// to have a NodeAddress of this type, its exact syntax and semantics are not
// defined, and are not consistent between different clusters.
NodeHostName NodeAddressType = "Hostname"
// NodeInternalIP identifies an IP address which is assigned to one of the node's
// network interfaces. Every node should have at least one address of this type.
//
// An internal IP is normally expected to be reachable from every other node, but
// may not be visible to hosts outside the cluster. By default it is assumed that
// kube-apiserver can reach node internal IPs, though it is possible to configure
// clusters where this is not the case.
//
// NodeInternalIP is the default type of node IP, and does not necessarily imply
// that the IP is ONLY reachable internally. If a node has multiple internal IPs,
// no specific semantics are assigned to the additional IPs.
NodeInternalIP NodeAddressType = "InternalIP"
// NodeExternalIP identifies an IP address which is, in some way, intended to be
// more usable from outside the cluster then an internal IP, though no specific
// semantics are defined. It may be a globally routable IP, though it is not
// required to be.
//
// External IPs may be assigned directly to an interface on the node, like a
// NodeInternalIP, or alternatively, packets sent to the external IP may be NAT'ed
// to an internal node IP rather than being delivered directly (making the IP less
// efficient for node-to-node traffic than a NodeInternalIP).
NodeExternalIP NodeAddressType = "ExternalIP"
// NodeInternalDNS identifies a DNS name which resolves to an IP address which has
// the characteristics of a NodeInternalIP. The IP it resolves to may or may not
// be a listed NodeInternalIP address.
NodeInternalDNS NodeAddressType = "InternalDNS"
// NodeExternalDNS identifies a DNS name which resolves to an IP address which has
// the characteristics of a NodeExternalIP. The IP it resolves to may or may not
// be a listed NodeExternalIP address.
NodeExternalDNS NodeAddressType = "ExternalDNS"
)
// NodeAddress contains information for the node's address.
type NodeAddress struct {
// Node address type, one of Hostname, ExternalIP or InternalIP.
Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"`
// The node address.
Address string `json:"address" protobuf:"bytes,2,opt,name=address"`
}
// ResourceName is the name identifying various resources in a ResourceList.
type ResourceName string
// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
// with the -, _, and . characters allowed anywhere, except the first or last character.
// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
// camel case, separating compound words.
// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
const (
// CPU, in cores. (500m = .5 cores)
ResourceCPU ResourceName = "cpu"
// Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceMemory ResourceName = "memory"
// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
ResourceStorage ResourceName = "storage"
// Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceEphemeralStorage is alpha and it can change across releases.
ResourceEphemeralStorage ResourceName = "ephemeral-storage"
)
const (
// Default namespace prefix.
ResourceDefaultNamespacePrefix = "kubernetes.io/"
// Name prefix for huge page resources (alpha).
ResourceHugePagesPrefix = "hugepages-"
// Name prefix for storage resource limits
ResourceAttachableVolumesPrefix = "attachable-volumes-"
)
// ResourceList is a set of (resource name, quantity) pairs.
type ResourceList map[ResourceName]resource.Quantity
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Node is a worker node in Kubernetes.
// Each node will have a unique identifier in the cache (i.e. in etcd).
type Node struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a node.
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the node.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeList is the whole list of all Nodes which have been registered with master.
type NodeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of nodes
Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// FinalizerName is the name identifying a finalizer during namespace lifecycle.
type FinalizerName string
// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or
// in metav1.
const (
FinalizerKubernetes FinalizerName = "kubernetes"
)
// NamespaceSpec describes the attributes on a Namespace.
type NamespaceSpec struct {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional
Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"`
}
// NamespaceStatus is information about the current status of a Namespace.
type NamespaceStatus struct {
// Phase is the current lifecycle phase of the namespace.
// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional
Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"`
// Represents the latest available observations of a namespace's current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// +enum
type NamespacePhase string
// These are the valid phases of a namespace.
const (
// NamespaceActive means the namespace is available for use in the system
NamespaceActive NamespacePhase = "Active"
// NamespaceTerminating means the namespace is undergoing graceful termination
NamespaceTerminating NamespacePhase = "Terminating"
)
const (
// NamespaceTerminatingCause is returned as a defaults.cause item when a change is
// forbidden due to the namespace being terminated.
NamespaceTerminatingCause metav1.CauseType = "NamespaceTerminating"
)
type NamespaceConditionType string
// These are built-in conditions of a namespace.
const (
// NamespaceDeletionDiscoveryFailure contains information about namespace deleter errors during resource discovery.
NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure"
// NamespaceDeletionContentFailure contains information about namespace deleter errors during deletion of resources.
NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure"
// NamespaceDeletionGVParsingFailure contains information about namespace deleter errors parsing GV for legacy types.
NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure"
// NamespaceContentRemaining contains information about resources remaining in a namespace.
NamespaceContentRemaining NamespaceConditionType = "NamespaceContentRemaining"
// NamespaceFinalizersRemaining contains information about which finalizers are on resources remaining in a namespace.
NamespaceFinalizersRemaining NamespaceConditionType = "NamespaceFinalizersRemaining"
)
// NamespaceCondition contains details about state of namespace.
type NamespaceCondition struct {
// Type of namespace controller condition.
Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// +genclient
// +genclient:nonNamespaced
// +genclient:skipVerbs=deleteCollection
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Namespace provides a scope for Names.
// Use of multiple namespaces is optional.
type Namespace struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of the Namespace.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status describes the current status of a Namespace.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NamespaceList is a list of Namespaces.
type NamespaceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of Namespace objects in the list.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
// Deprecated in 1.7, please use the bindings subresource of pods instead.
type Binding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The target object that you want to bind to the standard object.
Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"`
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
// +k8s:openapi-gen=false
type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodLogOptions is the query options for a Pod's logs REST call.
type PodLogOptions struct {
metav1.TypeMeta `json:",inline"`
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
// Follow the log stream of the pod. Defaults to false.
// +optional
Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
// Return previous terminated container logs. Defaults to false.
// +optional
Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
// A relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
// An RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output. Defaults to false.
// +optional
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
// If set, the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
// +optional
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
// If set, the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
// +optional
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
// insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the
// serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver
// and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real
// kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the
// connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept
// the actual log data coming from the real kubelet).
// +optional
InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodAttachOptions is the query options to a Pod's remote attach call.
// ---
// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodAttachOptions struct {
metav1.TypeMeta `json:",inline"`
// Stdin if true, redirects the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Stdout if true indicates that stdout is to be redirected for the attach call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Stderr if true indicates that stderr is to be redirected for the attach call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the attach call.
// This is passed through the container runtime so the tty
// is allocated on the worker node by the container runtime.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// The container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodExecOptions is the query options to a Pod's remote exec call.
// ---
// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodExecOptions struct {
metav1.TypeMeta `json:",inline"`
// Redirect the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Redirect the standard output stream of the pod for this call.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Redirect the standard error stream of the pod for this call.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the exec call.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// Container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
// Command is the remote command to execute. argv array. Not executed within a shell.
Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodPortForwardOptions is the query options to a Pod's port forward call
// when using WebSockets.
// The `port` query parameter must specify the port or
// ports (comma separated) to forward over.
// Port forwarding over SPDY does not use these options. It requires the port
// to be passed in the `port` header as part of request.
type PodPortForwardOptions struct {
metav1.TypeMeta `json:",inline"`
// List of ports to forward
// Required when using WebSockets
// +optional
Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodProxyOptions is the query options to a Pod's proxy call.
type PodProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to pod.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeProxyOptions is the query options to a Node's proxy call.
type NodeProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to node.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:conversion-gen:explicit-from=net/url.Values
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceProxyOptions is the query options to a Service's proxy call.
type ServiceProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the part of URLs that include service endpoints, suffixes,
// and parameters to use for the current proxy request to service.
// For example, the whole request URL is
// http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
// Path is _search?q=user:kimchy.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
// ---
// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.
// 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.
// 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular
// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted".
// Those cannot be well described when embedded.
// 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.
// 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity
// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple
// and the version of the actual struct is irrelevant.
// 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type
// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.
// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +structType=atomic
type ObjectReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
// Namespace of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
// UID of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// API version of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"`
// Specific resourceVersion to which this reference is made, if any.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// If referring to a piece of an object instead of an entire object, this string
// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
// For example, if the object reference is to a container within a pod, this would take on a value like:
// "spec.containers{name}" (where "name" refers to the name of the container that triggered
// the event) or if no container name is specified "spec.containers[2]" (container with
// index 2 in this pod). This syntax is chosen only to have some well-defined way of
// referencing a part of an object.
// TODO: this design is not final and this field is subject to change in the future.
// +optional
FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"`
}
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
// +structType=atomic
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// TODO: Add other useful fields. apiVersion, kind, uid?
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
}
// TypedLocalObjectReference contains enough information to let you locate the
// typed referenced object inside the same namespace.
// +structType=atomic
type TypedLocalObjectReference struct {
// APIGroup is the group for the resource being referenced.
// If APIGroup is not specified, the specified Kind must be in the core API group.
// For any other third-party types, APIGroup is required.
// +optional
APIGroup *string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"`
// Kind is the type of resource being referenced
Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"`
// Name is the name of resource being referenced
Name string `json:"name" protobuf:"bytes,3,opt,name=name"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SerializedReference is a reference to serialized object.
type SerializedReference struct {
metav1.TypeMeta `json:",inline"`
// The reference to an object in the system.
// +optional
Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
}
// EventSource contains information for an event.
type EventSource struct {
// Component from which the event is generated.
// +optional
Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
// Node name on which the event is generated.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// Valid values for event types (new types could be added in future)
const (
// Information only and will not cause any problems
EventTypeNormal string = "Normal"
// These events are to warn that something might go wrong
EventTypeWarning string = "Warning"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Event is a report of an event somewhere in the cluster. Events
// have a limited retention time and triggers and messages may evolve
// with time. Event consumers should not rely on the timing of an event
// with a given Reason reflecting a consistent underlying trigger, or the
// continued existence of events with that Reason. Events should be
// treated as informative, best-effort, supplemental data.
type Event struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
// The object that this event is about.
InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"`
// This should be a short, machine understandable string that gives the reason
// for the transition into the object's current status.
// TODO: provide exact specification for format.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// A human-readable description of the status of this operation.
// TODO: decide on maximum length.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// The component reporting this event. Should be a short machine understandable string.
// +optional
Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"`
// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
// +optional
FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"`
// The time at which the most recent occurrence of this event was recorded.
// +optional
LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"`
// The number of times this event has occurred.
// +optional
Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"`
// Type of this event (Normal, Warning), new types could be added in the future
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"`
// Time when this Event was first observed.
// +optional
EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"`
// Data about the Event series this event represents or nil if it's a singleton Event.
// +optional
Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"`
// What action was taken/failed regarding to the Regarding object.
// +optional
Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"`
// Optional secondary object for more complex actions.
// +optional
Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"`
// Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
// +optional
ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"`
// ID of the controller instance, e.g. `kubelet-xyzf`.
// +optional
ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"`
}
// EventSeries contain information on series of events, i.e. thing that was/is happening
// continuously for some time.
type EventSeries struct {
// Number of occurrences in this series up to the last heartbeat time
Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"`
// Time of the last occurrence observed
LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"`
// +k8s:deprecated=state,protobuf=3
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EventList is a list of events.
type EventList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of events
Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// List holds a list of objects, which may not be known by the server.
type List metav1.List
// LimitType is a type of object that is limited. It can be Pod, Container, PersistentVolumeClaim or
// a fully qualified resource name.
type LimitType string
const (
// Limit that applies to all pods in a namespace
LimitTypePod LimitType = "Pod"
// Limit that applies to all containers in a namespace
LimitTypeContainer LimitType = "Container"
// Limit that applies to all persistent volume claims in a namespace
LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim"
)
// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
type LimitRangeItem struct {
// Type of resource that this limit applies to.
Type LimitType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
// Max usage constraints on this kind by resource name.
// +optional
Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"`
// Min usage constraints on this kind by resource name.
// +optional
Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"`
// Default resource requirement limit value by resource name if resource limit is omitted.
// +optional
Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"`
// DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
// +optional
DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"`
// MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
// +optional
MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"`
}
// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
type LimitRangeSpec struct {
// Limits is the list of LimitRangeItem objects that are enforced.
Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
type LimitRange struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the limits enforced.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRangeList is a list of LimitRange items.
type LimitRangeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of LimitRange objects.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// The following identify resource constants for Kubernetes object types
const (
// Pods, number
ResourcePods ResourceName = "pods"
// Services, number
ResourceServices ResourceName = "services"
// ReplicationControllers, number
ResourceReplicationControllers ResourceName = "replicationcontrollers"
// ResourceQuotas, number
ResourceQuotas ResourceName = "resourcequotas"
// ResourceSecrets, number
ResourceSecrets ResourceName = "secrets"
// ResourceConfigMaps, number
ResourceConfigMaps ResourceName = "configmaps"
// ResourcePersistentVolumeClaims, number
ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
// ResourceServicesNodePorts, number
ResourceServicesNodePorts ResourceName = "services.nodeports"
// ResourceServicesLoadBalancers, number
ResourceServicesLoadBalancers ResourceName = "services.loadbalancers"
// CPU request, in cores. (500m = .5 cores)
ResourceRequestsCPU ResourceName = "requests.cpu"
// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsMemory ResourceName = "requests.memory"
// Storage request, in bytes
ResourceRequestsStorage ResourceName = "requests.storage"
// Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage"
// CPU limit, in cores. (500m = .5 cores)
ResourceLimitsCPU ResourceName = "limits.cpu"
// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsMemory ResourceName = "limits.memory"
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
)
// The following identify resource prefix for Kubernetes object types
const (
// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
ResourceRequestsHugePagesPrefix = "requests.hugepages-"
// Default resource requests prefix
DefaultResourceRequestsPrefix = "requests."
)
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
// +enum
type ResourceQuotaScope string
const (
// Match all pod objects where spec.activeDeadlineSeconds >=0
ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
// Match all pod objects where spec.activeDeadlineSeconds is nil
ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
// Match all pod objects that have best effort quality of service
ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
// Match all pod objects that do not have best effort quality of service
ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
// Match all pod objects that have priority class mentioned
ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass"
// Match all pod objects that have cross-namespace pod (anti)affinity mentioned.
ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity"
)
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
type ResourceQuotaSpec struct {
// hard is the set of desired hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// A collection of filters that must match each object tracked by a quota.
// If not specified, the quota matches all objects.
// +optional
Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"`
// scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota
// but expressed using ScopeSelectorOperator in combination with possible values.
// For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.
// +optional
ScopeSelector *ScopeSelector `json:"scopeSelector,omitempty" protobuf:"bytes,3,opt,name=scopeSelector"`
}
// A scope selector represents the AND of the selectors represented
// by the scoped-resource selector requirements.
// +structType=atomic
type ScopeSelector struct {
// A list of scope selector requirements by scope of the resources.
// +optional
MatchExpressions []ScopedResourceSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"`
}
// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator
// that relates the scope name and values.
type ScopedResourceSelectorRequirement struct {
// The name of the scope that the selector applies to.
ScopeName ResourceQuotaScope `json:"scopeName" protobuf:"bytes,1,opt,name=scopeName"`
// Represents a scope's relationship to a set of values.
// Valid operators are In, NotIn, Exists, DoesNotExist.
Operator ScopeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=ScopedResourceSelectorOperator"`
// An array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty.
// This array is replaced during a strategic merge patch.
// +optional
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
// A scope selector operator is the set of operators that can be used in
// a scope selector requirement.
// +enum
type ScopeSelectorOperator string
const (
ScopeSelectorOpIn ScopeSelectorOperator = "In"
ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn"
ScopeSelectorOpExists ScopeSelectorOperator = "Exists"
ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist"
)
// ResourceQuotaStatus defines the enforced hard limits and observed use.
type ResourceQuotaStatus struct {
// Hard is the set of enforced hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// Used is the current observed total usage of the resource in the namespace.
// +optional
Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuota sets aggregate quota restrictions enforced per namespace
type ResourceQuota struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired quota.
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status defines the actual enforced quota and its current usage.
// https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
// +optional
Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuotaList is a list of ResourceQuota items.
type ResourceQuotaList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of ResourceQuota objects.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Secret holds secret data of a certain type. The total bytes of the values in
// the Data field must be less than MaxSecretSize bytes.
type Secret struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Immutable, if set to true, ensures that data stored in the Secret cannot
// be updated (only object metadata can be modified).
// If not set to true, the field can be modified at any time.
// Defaulted to nil.
// +optional
Immutable *bool `json:"immutable,omitempty" protobuf:"varint,5,opt,name=immutable"`
// Data contains the secret data. Each key must consist of alphanumeric
// characters, '-', '_' or '.'. The serialized form of the secret data is a
// base64 encoded string, representing the arbitrary (possibly non-string)
// data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
// +optional
Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
// stringData allows specifying non-binary secret data in string form.
// It is provided as a write-only input field for convenience.
// All keys and values are merged into the data field on write, overwriting any existing values.
// The stringData field is never output when reading from the API.
// +k8s:conversion-gen=false
// +optional
StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"`
// Used to facilitate programmatic handling of secret data.
// More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types
// +optional
Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"`
}
const MaxSecretSize = 1 * 1024 * 1024
type SecretType string
const (
// SecretTypeOpaque is the default. Arbitrary user-defined data
SecretTypeOpaque SecretType = "Opaque"
// SecretTypeServiceAccountToken contains a token that identifies a service account to the API
//
// Required fields:
// - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies
// - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies
// - Secret.Data["token"] - a token that identifies the service account to the API
SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
// ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountNameKey = "kubernetes.io/service-account.name"
// ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountUIDKey = "kubernetes.io/service-account.uid"
// ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets
ServiceAccountTokenKey = "token"
// ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets
ServiceAccountKubeconfigKey = "kubernetes.kubeconfig"
// ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets
ServiceAccountRootCAKey = "ca.crt"
// ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls
ServiceAccountNamespaceKey = "namespace"
// SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg
//
// Required fields:
// - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file
SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg"
// DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets
DockerConfigKey = ".dockercfg"
// SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json
//
// Required fields:
// - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file
SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson"
// DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets
DockerConfigJsonKey = ".dockerconfigjson"
// SecretTypeBasicAuth contains data needed for basic authentication.
//
// Required at least one of fields:
// - Secret.Data["username"] - username used for authentication
// - Secret.Data["password"] - password or token needed for authentication
SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth"
// BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets
BasicAuthUsernameKey = "username"
// BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets
BasicAuthPasswordKey = "password"
// SecretTypeSSHAuth contains data needed for SSH authetication.
//
// Required field:
// - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication
SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth"
// SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets
SSHAuthPrivateKey = "ssh-privatekey"
// SecretTypeTLS contains information about a TLS client or server secret. It
// is primarily used with TLS termination of the Ingress resource, but may be
// used in other types.
//
// Required fields:
// - Secret.Data["tls.key"] - TLS private key.
// Secret.Data["tls.crt"] - TLS certificate.
// TODO: Consider supporting different formats, specifying CA/destinationCA.
SecretTypeTLS SecretType = "kubernetes.io/tls"
// TLSCertKey is the key for tls certificates in a TLS secret.
TLSCertKey = "tls.crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls.key"
// SecretTypeBootstrapToken is used during the automated bootstrap process (first
// implemented by kubeadm). It stores tokens that are used to sign well known
// ConfigMaps. They are used for authn.
SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SecretList is a list of Secret.
type SecretList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of secret objects.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMap holds configuration data for pods to consume.
type ConfigMap struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Immutable, if set to true, ensures that data stored in the ConfigMap cannot
// be updated (only object metadata can be modified).
// If not set to true, the field can be modified at any time.
// Defaulted to nil.
// +optional
Immutable *bool `json:"immutable,omitempty" protobuf:"varint,4,opt,name=immutable"`
// Data contains the configuration data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// Values with non-UTF-8 byte sequences must use the BinaryData field.
// The keys stored in Data must not overlap with the keys in
// the BinaryData field, this is enforced during validation process.
// +optional
Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
// BinaryData contains the binary data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// BinaryData can contain byte sequences that are not in the UTF-8 range.
// The keys stored in BinaryData must not overlap with the ones in
// the Data field, this is enforced during validation process.
// Using this field will require 1.10+ apiserver and
// kubelet.
// +optional
BinaryData map[string][]byte `json:"binaryData,omitempty" protobuf:"bytes,3,rep,name=binaryData"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMapList is a resource containing a list of ConfigMap objects.
type ConfigMapList struct {
metav1.TypeMeta `json:",inline"`
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of ConfigMaps.
Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Type and constants for component health validation.
type ComponentConditionType string
// These are the valid conditions for the component.
const (
ComponentHealthy ComponentConditionType = "Healthy"
)
// Information about the condition of a component.
type ComponentCondition struct {
// Type of condition for a component.
// Valid value: "Healthy"
Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"`
// Status of the condition for a component.
// Valid values for "Healthy": "True", "False", or "Unknown".
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Message about the condition for a component.
// For example, information about a health check.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// Condition error code for a component.
// For example, a health check error code.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
// Deprecated: This API is deprecated in v1.19+
type ComponentStatus struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of component conditions observed
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Status of all the conditions for the component as a list of ComponentStatus objects.
// Deprecated: This API is deprecated in v1.19+
type ComponentStatusList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ComponentStatus objects.
Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// DownwardAPIVolumeSource represents a volume containing downward API info.
// Downward API volumes support ownership management and SELinux relabeling.
type DownwardAPIVolumeSource struct {
// Items is a list of downward API volume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// Optional: mode bits used to set permissions on created files by default.
// Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
const (
DownwardAPIVolumeSourceDefaultMode int32 = 0644
)
// DownwardAPIVolumeFile represents information to create the file containing the pod field
type DownwardAPIVolumeFile struct {
// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"`
// Optional: mode bits used to set permissions on this file, must be an octal value
// between 0000 and 0777 or a decimal value between 0 and 511.
// YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
// If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"`
}
// Represents downward API info for projecting into a projected volume.
// Note that this is identical to a downwardAPI volume source without the default
// mode.
type DownwardAPIProjection struct {
// Items is a list of DownwardAPIVolume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
}
// SecurityContext holds security configuration that will be applied to a container.
// Some fields are present in both SecurityContext and PodSecurityContext. When both
// are set, the values in SecurityContext take precedence.
type SecurityContext struct {
// The capabilities to add/drop when running containers.
// Defaults to the default set of capabilities granted by the container runtime.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"`
// Run container in privileged mode.
// Processes in privileged containers are essentially equivalent to root on the host.
// Defaults to false.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"`
// The SELinux context to be applied to the container.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"`
// The Windows specific settings applied to all containers.
// If unspecified, the options from the PodSecurityContext will be used.
// If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
// Note that this field cannot be set when spec.os.name is linux.
// +optional
WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"`
// The GID to run the entrypoint of the container process.
// Uses runtime default if unset.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,8,opt,name=runAsGroup"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"`
// Whether this container has a read-only root filesystem.
// Default is false.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"`
// AllowPrivilegeEscalation controls whether a process can gain more
// privileges than its parent process. This bool directly controls if
// the no_new_privs flag will be set on the container process.
// AllowPrivilegeEscalation is true always when the container is:
// 1) run as Privileged
// 2) has CAP_SYS_ADMIN
// Note that this field cannot be set when spec.os.name is windows.
// +optional
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
// procMount denotes the type of proc mount to use for the containers.
// The default is DefaultProcMount which uses the container runtime defaults for
// readonly paths and masked paths.
// This requires the ProcMountType feature flag to be enabled.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
ProcMount *ProcMountType `json:"procMount,omitempty" protobuf:"bytes,9,opt,name=procMount"`
// The seccomp options to use by this container. If seccomp options are
// provided at both the pod & container level, the container options
// override the pod options.
// Note that this field cannot be set when spec.os.name is windows.
// +optional
SeccompProfile *SeccompProfile `json:"seccompProfile,omitempty" protobuf:"bytes,11,opt,name=seccompProfile"`
}
// +enum
type ProcMountType string
const (
// DefaultProcMount uses the container runtime defaults for readonly and masked
// paths for /proc. Most container runtimes mask certain paths in /proc to avoid
// accidental security exposure of special devices or information.
DefaultProcMount ProcMountType = "Default"
// UnmaskedProcMount bypasses the default masking behavior of the container
// runtime and ensures the newly created /proc the container stays in tact with
// no modifications.
UnmaskedProcMount ProcMountType = "Unmasked"
)
// SELinuxOptions are the labels to be applied to the container
type SELinuxOptions struct {
// User is a SELinux user label that applies to the container.
// +optional
User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"`
// Role is a SELinux role label that applies to the container.
// +optional
Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"`
// Type is a SELinux type label that applies to the container.
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"`
// Level is SELinux level label that applies to the container.
// +optional
Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"`
}
// WindowsSecurityContextOptions contain Windows-specific options and credentials.
type WindowsSecurityContextOptions struct {
// GMSACredentialSpecName is the name of the GMSA credential spec to use.
// +optional
GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"`
// GMSACredentialSpec is where the GMSA admission webhook
// (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
// GMSA credential spec named by the GMSACredentialSpecName field.
// +optional
GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"`
// The UserName in Windows to run the entrypoint of the container process.
// Defaults to the user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"`
// HostProcess determines if a container should be run as a 'Host Process' container.
// This field is alpha-level and will only be honored by components that enable the
// WindowsHostProcessContainers feature flag. Setting this field without the feature
// flag will result in errors when validating the Pod. All of a Pod's containers must
// have the same effective HostProcess value (it is not allowed to have a mix of HostProcess
// containers and non-HostProcess containers). In addition, if HostProcess is true
// then HostNetwork must also be set to true.
// +optional
HostProcess *bool `json:"hostProcess,omitempty" protobuf:"bytes,4,opt,name=hostProcess"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RangeAllocation is not a public type.
type RangeAllocation struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Range is string that identifies the range represented by 'data'.
Range string `json:"range" protobuf:"bytes,2,opt,name=range"`
// Data is a bit array containing all allocated addresses in the previous segment.
Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"`
}
const (
// DefaultSchedulerName defines the name of default scheduler.
DefaultSchedulerName = "default-scheduler"
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule.
// When the --hard-pod-affinity-weight scheduler flag is not specified,
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
DefaultHardPodAffinitySymmetricWeight int32 = 1
)
// Sysctl defines a kernel parameter to be set
type Sysctl struct {
// Name of a property to set
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Value of a property to set
Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
}
// NodeResources is an object for conveying resource information about a node.
// see https://kubernetes.io/docs/concepts/architecture/nodes/#capacity for more details.
type NodeResources struct {
// Capacity represents the available resources of a node
Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
}
const (
// Enable stdin for remote command execution
ExecStdinParam = "input"
// Enable stdout for remote command execution
ExecStdoutParam = "output"
// Enable stderr for remote command execution
ExecStderrParam = "error"
// Enable TTY for remote command execution
ExecTTYParam = "tty"
// Command to run for remote command execution
ExecCommandParam = "command"
// Name of header that specifies stream type
StreamType = "streamType"
// Value for streamType header for stdin stream
StreamTypeStdin = "stdin"
// Value for streamType header for stdout stream
StreamTypeStdout = "stdout"
// Value for streamType header for stderr stream
StreamTypeStderr = "stderr"
// Value for streamType header for data stream
StreamTypeData = "data"
// Value for streamType header for error stream
StreamTypeError = "error"
// Value for streamType header for terminal resize stream
StreamTypeResize = "resize"
// Name of header that specifies the port being forwarded
PortHeader = "port"
// Name of header that specifies a request ID used to associate the error
// and data streams for a single forwarded connection
PortForwardRequestIDHeader = "requestID"
)
// PortStatus represents the error condition of a service port
type PortStatus struct {
// Port is the port number of the service port of which status is recorded here
Port int32 `json:"port" protobuf:"varint,1,opt,name=port"`
// Protocol is the protocol of the service port of which status is recorded here
// The supported values are: "TCP", "UDP", "SCTP"
Protocol Protocol `json:"protocol" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
// Error is to record the problem with the service port
// The format of the error shall comply with the following rules:
// - built-in error values shall be specified in this file and those shall use
// CamelCase names
// - cloud provider specific error values must have names that comply with the
// format foo.example.com/CamelCase.
// ---
// The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
// +optional
// +kubebuilder:validation:Required
// +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`
// +kubebuilder:validation:MaxLength=316
Error *string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"`
}
|
package main
const Version = "0.0.3"
Bump version.
package main
const Version = "0.0.4"
|
package datastore
import (
"io"
"log"
dsq "github.com/ipfs/go-datastore/query"
)
// Here are some basic datastore implementations.
type keyMap map[Key]interface{}
// MapDatastore uses a standard Go map for internal storage.
type MapDatastore struct {
values keyMap
}
// NewMapDatastore constructs a MapDatastore
func NewMapDatastore() (d *MapDatastore) {
return &MapDatastore{
values: keyMap{},
}
}
// Put implements Datastore.Put
func (d *MapDatastore) Put(key Key, value interface{}) (err error) {
d.values[key] = value
return nil
}
// Get implements Datastore.Get
func (d *MapDatastore) Get(key Key) (value interface{}, err error) {
val, found := d.values[key]
if !found {
return nil, ErrNotFound
}
return val, nil
}
// Has implements Datastore.Has
func (d *MapDatastore) Has(key Key) (exists bool, err error) {
_, found := d.values[key]
return found, nil
}
// Delete implements Datastore.Delete
func (d *MapDatastore) Delete(key Key) (err error) {
if _, found := d.values[key]; !found {
return ErrNotFound
}
delete(d.values, key)
return nil
}
// Query implements Datastore.Query
func (d *MapDatastore) Query(q dsq.Query) (dsq.Results, error) {
re := make([]dsq.Entry, 0, len(d.values))
for k, v := range d.values {
re = append(re, dsq.Entry{Key: k.String(), Value: v})
}
r := dsq.ResultsWithEntries(q, re)
r = dsq.NaiveQueryApply(q, r)
return r, nil
}
func (d *MapDatastore) Batch() (Batch, error) {
return NewBasicBatch(d), nil
}
func (d *MapDatastore) Close() error {
return nil
}
// NullDatastore stores nothing, but conforms to the API.
// Useful to test with.
type NullDatastore struct {
}
// NewNullDatastore constructs a null datastoe
func NewNullDatastore() *NullDatastore {
return &NullDatastore{}
}
// Put implements Datastore.Put
func (d *NullDatastore) Put(key Key, value interface{}) (err error) {
return nil
}
// Get implements Datastore.Get
func (d *NullDatastore) Get(key Key) (value interface{}, err error) {
return nil, nil
}
// Has implements Datastore.Has
func (d *NullDatastore) Has(key Key) (exists bool, err error) {
return false, nil
}
// Delete implements Datastore.Delete
func (d *NullDatastore) Delete(key Key) (err error) {
return nil
}
// Query implements Datastore.Query
func (d *NullDatastore) Query(q dsq.Query) (dsq.Results, error) {
return dsq.ResultsWithEntries(q, nil), nil
}
func (d *NullDatastore) Batch() (Batch, error) {
return NewBasicBatch(d), nil
}
func (d *NullDatastore) Close() error {
return nil
}
// LogDatastore logs all accesses through the datastore.
type LogDatastore struct {
Name string
child Datastore
}
// Shim is a datastore which has a child.
type Shim interface {
Datastore
Children() []Datastore
}
// NewLogDatastore constructs a log datastore.
func NewLogDatastore(ds Datastore, name string) *LogDatastore {
if len(name) < 1 {
name = "LogDatastore"
}
return &LogDatastore{Name: name, child: ds}
}
// Children implements Shim
func (d *LogDatastore) Children() []Datastore {
return []Datastore{d.child}
}
// Put implements Datastore.Put
func (d *LogDatastore) Put(key Key, value interface{}) (err error) {
log.Printf("%s: Put %s\n", d.Name, key)
// log.Printf("%s: Put %s ```%s```", d.Name, key, value)
return d.child.Put(key, value)
}
// Get implements Datastore.Get
func (d *LogDatastore) Get(key Key) (value interface{}, err error) {
log.Printf("%s: Get %s\n", d.Name, key)
return d.child.Get(key)
}
// Has implements Datastore.Has
func (d *LogDatastore) Has(key Key) (exists bool, err error) {
log.Printf("%s: Has %s\n", d.Name, key)
return d.child.Has(key)
}
// Delete implements Datastore.Delete
func (d *LogDatastore) Delete(key Key) (err error) {
log.Printf("%s: Delete %s\n", d.Name, key)
return d.child.Delete(key)
}
// Query implements Datastore.Query
func (d *LogDatastore) Query(q dsq.Query) (dsq.Results, error) {
log.Printf("%s: Query\n", d.Name)
return d.child.Query(q)
}
func (d *LogDatastore) Batch() (Batch, error) {
log.Printf("%s: Batch\n", d.Name)
if bds, ok := d.child.(Batching); ok {
return bds.Batch()
}
return nil, ErrBatchUnsupported
}
func (d *LogDatastore) Close() error {
log.Printf("%s: Close\n", d.Name)
if cds, ok := d.child.(io.Closer); ok {
return cds.Close()
}
return nil
}
Log more in LogDatastore
package datastore
import (
"io"
"log"
dsq "github.com/ipfs/go-datastore/query"
)
// Here are some basic datastore implementations.
type keyMap map[Key]interface{}
// MapDatastore uses a standard Go map for internal storage.
type MapDatastore struct {
values keyMap
}
// NewMapDatastore constructs a MapDatastore
func NewMapDatastore() (d *MapDatastore) {
return &MapDatastore{
values: keyMap{},
}
}
// Put implements Datastore.Put
func (d *MapDatastore) Put(key Key, value interface{}) (err error) {
d.values[key] = value
return nil
}
// Get implements Datastore.Get
func (d *MapDatastore) Get(key Key) (value interface{}, err error) {
val, found := d.values[key]
if !found {
return nil, ErrNotFound
}
return val, nil
}
// Has implements Datastore.Has
func (d *MapDatastore) Has(key Key) (exists bool, err error) {
_, found := d.values[key]
return found, nil
}
// Delete implements Datastore.Delete
func (d *MapDatastore) Delete(key Key) (err error) {
if _, found := d.values[key]; !found {
return ErrNotFound
}
delete(d.values, key)
return nil
}
// Query implements Datastore.Query
func (d *MapDatastore) Query(q dsq.Query) (dsq.Results, error) {
re := make([]dsq.Entry, 0, len(d.values))
for k, v := range d.values {
re = append(re, dsq.Entry{Key: k.String(), Value: v})
}
r := dsq.ResultsWithEntries(q, re)
r = dsq.NaiveQueryApply(q, r)
return r, nil
}
func (d *MapDatastore) Batch() (Batch, error) {
return NewBasicBatch(d), nil
}
func (d *MapDatastore) Close() error {
return nil
}
// NullDatastore stores nothing, but conforms to the API.
// Useful to test with.
type NullDatastore struct {
}
// NewNullDatastore constructs a null datastoe
func NewNullDatastore() *NullDatastore {
return &NullDatastore{}
}
// Put implements Datastore.Put
func (d *NullDatastore) Put(key Key, value interface{}) (err error) {
return nil
}
// Get implements Datastore.Get
func (d *NullDatastore) Get(key Key) (value interface{}, err error) {
return nil, nil
}
// Has implements Datastore.Has
func (d *NullDatastore) Has(key Key) (exists bool, err error) {
return false, nil
}
// Delete implements Datastore.Delete
func (d *NullDatastore) Delete(key Key) (err error) {
return nil
}
// Query implements Datastore.Query
func (d *NullDatastore) Query(q dsq.Query) (dsq.Results, error) {
return dsq.ResultsWithEntries(q, nil), nil
}
func (d *NullDatastore) Batch() (Batch, error) {
return NewBasicBatch(d), nil
}
func (d *NullDatastore) Close() error {
return nil
}
// LogDatastore logs all accesses through the datastore.
type LogDatastore struct {
Name string
child Datastore
}
// Shim is a datastore which has a child.
type Shim interface {
Datastore
Children() []Datastore
}
// NewLogDatastore constructs a log datastore.
func NewLogDatastore(ds Datastore, name string) *LogDatastore {
if len(name) < 1 {
name = "LogDatastore"
}
return &LogDatastore{Name: name, child: ds}
}
// Children implements Shim
func (d *LogDatastore) Children() []Datastore {
return []Datastore{d.child}
}
// Put implements Datastore.Put
func (d *LogDatastore) Put(key Key, value interface{}) (err error) {
log.Printf("%s: Put %s\n", d.Name, key)
// log.Printf("%s: Put %s ```%s```", d.Name, key, value)
return d.child.Put(key, value)
}
// Get implements Datastore.Get
func (d *LogDatastore) Get(key Key) (value interface{}, err error) {
log.Printf("%s: Get %s\n", d.Name, key)
return d.child.Get(key)
}
// Has implements Datastore.Has
func (d *LogDatastore) Has(key Key) (exists bool, err error) {
log.Printf("%s: Has %s\n", d.Name, key)
return d.child.Has(key)
}
// Delete implements Datastore.Delete
func (d *LogDatastore) Delete(key Key) (err error) {
log.Printf("%s: Delete %s\n", d.Name, key)
return d.child.Delete(key)
}
// Query implements Datastore.Query
func (d *LogDatastore) Query(q dsq.Query) (dsq.Results, error) {
log.Printf("%s: Query\n", d.Name)
log.Printf("%s: q.Prefix: %s\n", d.Name, q.Prefix)
log.Printf("%s: q.KeysOnly: %s\n", d.Name, q.KeysOnly)
log.Printf("%s: q.Filters: %d\n", d.Name, len(q.Filters))
log.Printf("%s: q.Orders: %d\n", d.Name, len(q.Orders))
log.Printf("%s: q.Offset: %d\n", d.Name, q.Offset)
return d.child.Query(q)
}
// LogBatch logs all accesses through the batch.
type LogBatch struct {
Name string
child Batch
}
func (d *LogDatastore) Batch() (Batch, error) {
log.Printf("%s: Batch\n", d.Name)
if bds, ok := d.child.(Batching); ok {
b, err := bds.Batch()
if err != nil {
return nil, err
}
return &LogBatch{
Name: d.Name,
child: b,
}, nil
}
return nil, ErrBatchUnsupported
}
// Put implements Batch.Put
func (d *LogBatch) Put(key Key, value interface{}) (err error) {
log.Printf("%s: BatchPut %s\n", d.Name, key)
// log.Printf("%s: Put %s ```%s```", d.Name, key, value)
return d.child.Put(key, value)
}
// Delete implements Batch.Delete
func (d *LogBatch) Delete(key Key) (err error) {
log.Printf("%s: BatchDelete %s\n", d.Name, key)
return d.child.Delete(key)
}
// Commit implements Batch.Commit
func (d *LogBatch) Commit() (err error) {
log.Printf("%s: BatchCommit\n", d.Name)
return d.child.Commit()
}
func (d *LogDatastore) Close() error {
log.Printf("%s: Close\n", d.Name)
if cds, ok := d.child.(io.Closer); ok {
return cds.Close()
}
return nil
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
)
// The comments for the structs and fields can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored and not exported to the SwaggerAPI.
//
// The aforementioned methods can be generated by hack/update-generated-swagger-docs.sh
// Common string formats
// ---------------------
// Many fields in this API have formatting requirements. The commonly used
// formats are defined here.
//
// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier"
// in the C language. This is captured by the following regex:
// [A-Za-z_][A-Za-z0-9_]*
// This defines the format, but not the length restriction, which should be
// specified at the definition of any field of this type.
//
// DNS_LABEL: This is a string, no more than 63 characters long, that conforms
// to the definition of a "label" in RFCs 1035 and 1123. This is captured
// by the following regex:
// [a-z0-9]([-a-z0-9]*[a-z0-9])?
//
// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms
// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured
// by the following regex:
// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
// or more simply:
// DNS_LABEL(\.DNS_LABEL)*
//
// IANA_SVC_NAME: This is a string, no more than 15 characters long, that
// conforms to the definition of IANA service name in RFC 6335.
// It must contains at least one letter [a-z] and it must contains only [a-z0-9-].
// Hypens ('-') cannot be leading or trailing character of the string
// and cannot be adjacent to other hyphens.
// ObjectMeta is metadata that all persisted resources must have, which includes all objects
// users must create.
// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon.
// +k8s:openapi-gen=false
type ObjectMeta struct {
// Name must be unique within a namespace. Is required when creating resources, although
// some resources may allow a client to request the generation of an appropriate name
// automatically. Name is primarily intended for creation idempotence and configuration
// definition.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// GenerateName is an optional prefix, used by the server, to generate a unique
// name ONLY IF the Name field has not been provided.
// If this field is used, the name returned to the client will be different
// than the name passed. This value will also be combined with a unique suffix.
// The provided value has the same validation rules as the Name field,
// and may be truncated by the length of the suffix required to make the value
// unique on the server.
//
// If this field is specified and the generated name exists, the server will
// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
// ServerTimeout indicating a unique name could not be found in the time allotted, and the client
// should retry (optionally after the time indicated in the Retry-After header).
//
// Applied only if Name is not specified.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
// +optional
GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"`
// Namespace defines the space within each name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
//
// Must be a DNS_LABEL.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
// SelfLink is a URL representing this object.
// Populated by the system.
// Read-only.
// +optional
SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"`
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
//
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// An opaque value that represents the internal version of this object that can
// be used by clients to determine when objects have changed. May be used for optimistic
// concurrency, change detection, and the watch operation on a resource or set of resources.
// Clients must treat these values as opaque and passed unmodified back to the server.
// They may only be valid for a particular resource or set of resources.
//
// Populated by the system.
// Read-only.
// Value must be treated as opaque by clients and .
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// A sequence number representing a specific generation of the desired state.
// Populated by the system. Read-only.
// +optional
Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"`
// CreationTimestamp is a timestamp representing the server time when this object was
// created. It is not guaranteed to be set in happens-before order across separate operations.
// Clients may not set this value. It is represented in RFC3339 form and is in UTC.
//
// Populated by the system.
// Read-only.
// Null for lists.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"`
// DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
// field is set by the server when a graceful deletion is requested by the user, and is not
// directly settable by a client. The resource is expected to be deleted (no longer visible
// from resource lists, and not reachable by name) after the time in this field. Once set,
// this value may not be unset or be set further into the future, although it may be shortened
// or the resource may be deleted prior to this time. For example, a user may request that
// a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination
// signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard
// termination signal (SIGKILL) to the container and after cleanup, remove the pod from the
// API. In the presence of network partitions, this object may still exist after this
// timestamp, until an administrator or automated process can determine the resource is
// fully terminated.
// If not set, graceful deletion of the object has not been requested.
//
// Populated by the system when a graceful deletion is requested.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"`
// Number of seconds allowed for this object to gracefully terminate before
// it will be removed from the system. Only set when deletionTimestamp is also set.
// May only be shortened.
// Read-only.
// +optional
DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"`
// Map of string keys and values that can be used to organize and categorize
// (scope and select) objects. May match selectors of replication controllers
// and services.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
// +optional
Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"`
// Annotations is an unstructured key value map stored with a resource that may be
// set by external tools to store and retrieve arbitrary metadata. They are not
// queryable and should be preserved when modifying objects.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
// +optional
Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
// List of objects depended by this object. If ALL objects in the list have
// been deleted, this object will be garbage collected. If this object is managed by a controller,
// then an entry in this list will point to this controller, with the controller field set to true.
// There cannot be more than one managing controller.
// +optional
// +patchMergeKey=uid
// +patchStrategy=merge
OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
// An initializer is a controller which enforces some system invariant at object creation time.
// This field is a list of initializers that have not yet acted on this object. If nil or empty,
// this object has been completely initialized. Otherwise, the object is considered uninitialized
// and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to
// observe uninitialized objects.
//
// When an object is created, the system will populate this list with the current set of initializers.
// Only privileged users may set or modify this list. Once it is empty, it may not be modified further
// by any user.
Initializers *metav1.Initializers `json:"initializers,omitempty" patchStrategy:"merge" protobuf:"bytes,16,rep,name=initializers"`
// Must be empty before the object is deleted from the registry. Each entry
// is an identifier for the responsible component that will remove the entry
// from the list. If the deletionTimestamp of the object is non-nil, entries
// in this list can only be removed.
// +optional
// +patchStrategy=merge
Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
// The name of the cluster which the object belongs to.
// This is used to distinguish resources with same name and namespace in different clusters.
// This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
// +optional
ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"`
}
const (
// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
NamespaceDefault string = "default"
// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
NamespaceAll string = ""
)
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
type Volume struct {
// Volume's name.
// Must be a DNS_LABEL and unique within the pod.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// VolumeSource represents the location and type of the mounted volume.
// If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"`
}
// Represents the source of a volume to mount.
// Only one of its members may be specified.
type VolumeSource struct {
// HostPath represents a pre-existing file or directory on the host
// machine that is directly exposed to the container. This is generally
// used for system agents or other privileged things that are allowed
// to see the host machine. Most containers will NOT need this.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// ---
// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
// mount host directories as read/write.
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"`
// EmptyDir represents a temporary directory that shares a pod's lifetime.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
// GitRepo represents a git repository at a particular revision.
// +optional
GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"`
// Secret represents a secret that should populate this volume.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"`
// NFS represents an NFS mount on the host that shares a pod's lifetime
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
// Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
// PersistentVolumeClaimVolumeSource represents a reference to a
// PersistentVolumeClaim in the same namespace.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an
// alpha feature and may change in future.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
// Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
// DownwardAPI represents downward API about the pod that should populate this volume
// +optional
DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
// ConfigMap represents a configMap that should populate this volume
// +optional
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
// Items for all in one resources secrets, configmaps, and downward API
Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
// StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
// +optional
StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
// This volume finds the bound PV and mounts that volume for the pod. A
// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
// type of volume that is owned by someone else (the system).
type PersistentVolumeClaimVolumeSource struct {
// ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"`
// Will force the ReadOnly setting in VolumeMounts.
// Default false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
}
// PersistentVolumeSource is similar to VolumeSource but meant for the
// administrator who creates PVs. Exactly one of its members must be set.
type PersistentVolumeSource struct {
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
// HostPath represents a directory on the host.
// Provisioned by a developer or tester.
// This is useful for single-node development and testing only!
// On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
// Glusterfs represents a Glusterfs volume that is attached to a host and
// exposed to the pod. Provisioned by an admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
// NFS represents an NFS mount on the host. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
// Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an
// alpha feature and may change in future.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
// Local represents directly-attached storage with node affinity
// +optional
Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
// StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
// More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
}
const (
// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation.
// It's currently still used and will be held for backwards compatibility
BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
// MountOptionAnnotation defines mount option annotation used in PVs
MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
// AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume.
// Value is a string of the json representation of type NodeAffinity
AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
// It is analogous to a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
type PersistentVolume struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines a specification of a persistent volume owned by the cluster.
// Provisioned by an administrator.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information/status for the persistent volume.
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// PersistentVolumeSpec is the specification of a persistent volume.
type PersistentVolumeSpec struct {
// A description of the persistent volume's resources and capacity.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// The actual volume backing the persistent volume.
PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"`
// AccessModes contains all ways the volume can be mounted.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
// Expected to be non-nil when bound.
// claim.VolumeName is the authoritative bind between PV and PVC.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
// +optional
ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"`
// What happens to a persistent volume when released from its claim.
// Valid options are Retain (default) and Recycle.
// Recycling must be supported by the volume plugin underlying this persistent volume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
// +optional
PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
// Name of StorageClass to which this persistent volume belongs. Empty value
// means that this volume does not belong to any StorageClass.
// +optional
StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"`
// A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
// simply fail if one is invalid.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
// +optional
MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
}
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
type PersistentVolumeReclaimPolicy string
const (
// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
// The volume plugin must support Recycling.
PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
// The volume plugin must support Deletion.
PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
// PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator.
// The default policy is Retain.
PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
)
// PersistentVolumeStatus is the current status of a persistent volume.
type PersistentVolumeStatus struct {
// Phase indicates if a volume is available, bound to a claim, or released by a claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
// +optional
Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"`
// A human-readable message indicating details about why the volume is in this state.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
// Reason is a brief CamelCase string that describes any failure and is meant
// for machine parsing and tidy display in the CLI.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeList is a list of PersistentVolume items.
type PersistentVolumeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of persistent volumes.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaim is a user's request for and claim to a persistent volume
type PersistentVolumeClaim struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired characteristics of a volume requested by a pod author.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information/status of a persistent volume claim.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
type PersistentVolumeClaimList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// A list of persistent volume claims.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PersistentVolumeClaimSpec describes the common attributes of storage devices
// and allows a Source for provider-specific attributes
type PersistentVolumeClaimSpec struct {
// AccessModes contains the desired access modes the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// A label query over volumes to consider for binding.
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
// Resources represents the minimum resources the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
// VolumeName is the binding reference to the PersistentVolume backing this claim.
// +optional
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
// Name of the StorageClass required by the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
// +optional
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
}
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
type PersistentVolumeClaimStatus struct {
// Phase represents the current phase of PersistentVolumeClaim.
// +optional
Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"`
// AccessModes contains the actual access modes the volume backing the PVC has.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// Represents the actual resources of the underlying volume.
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
}
type PersistentVolumeAccessMode string
const (
// can be mounted read/write mode to exactly 1 host
ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
// can be mounted in read-only mode to many hosts
ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
// can be mounted in read/write mode to many hosts
ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
)
type PersistentVolumePhase string
const (
// used for PersistentVolumes that are not available
VolumePending PersistentVolumePhase = "Pending"
// used for PersistentVolumes that are not yet bound
// Available volumes are held by the binder and matched to PersistentVolumeClaims
VolumeAvailable PersistentVolumePhase = "Available"
// used for PersistentVolumes that are bound
VolumeBound PersistentVolumePhase = "Bound"
// used for PersistentVolumes where the bound PersistentVolumeClaim was deleted
// released volumes must be recycled before becoming available again
// this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource
VolumeReleased PersistentVolumePhase = "Released"
// used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim
VolumeFailed PersistentVolumePhase = "Failed"
)
type PersistentVolumeClaimPhase string
const (
// used for PersistentVolumeClaims that are not yet bound
ClaimPending PersistentVolumeClaimPhase = "Pending"
// used for PersistentVolumeClaims that are bound
ClaimBound PersistentVolumeClaimPhase = "Bound"
// used for PersistentVolumeClaims that lost their underlying
// PersistentVolume. The claim was bound to a PersistentVolume and this
// volume does not exist any longer and all data on it was lost.
ClaimLost PersistentVolumeClaimPhase = "Lost"
)
type HostPathType string
const (
// For backwards compatible, leave it empty if unset
HostPathUnset HostPathType = ""
// If nothing exists at the given path, an empty directory will be created there
// as needed with file mode 0755, having the same group and ownership with Kubelet.
HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate"
// A directory must exist at the given path
HostPathDirectory HostPathType = "Directory"
// If nothing exists at the given path, an empty file will be created there
// as needed with file mode 0644, having the same group and ownership with Kubelet.
HostPathFileOrCreate HostPathType = "FileOrCreate"
// A file must exist at the given path
HostPathFile HostPathType = "File"
// A UNIX socket must exist at the given path
HostPathSocket HostPathType = "Socket"
// A character device must exist at the given path
HostPathCharDev HostPathType = "CharDevice"
// A block device must exist at the given path
HostPathBlockDev HostPathType = "BlockDevice"
)
// Represents a host path mapped into a pod.
// Host path volumes do not support ownership management or SELinux relabeling.
type HostPathVolumeSource struct {
// Path of the directory on the host.
// If the path is a symlink, it will follow the link to the real path.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Type for HostPath Volume
// Defaults to ""
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
}
// Represents an empty directory for a pod.
// Empty directory volumes support ownership management and SELinux relabeling.
type EmptyDirVolumeSource struct {
// What type of storage medium should back this directory.
// The default is "" which means to use the node's default medium.
// Must be an empty string (default) or Memory.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"`
// Total amount of local storage required for this EmptyDir volume.
// The size limit is also applicable for memory medium.
// The maximum usage on memory medium EmptyDir would be the minimum value between
// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
// The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// +optional
SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"`
}
// Represents a Glusterfs mount that lasts the lifetime of a pod.
// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsVolumeSource struct {
// EndpointsName is the endpoint name that details Glusterfs topology.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
// Path is the Glusterfs volume path.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDVolumeSource struct {
// A collection of Ceph monitors.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// The rados image name.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// The rados pool name.
// Default is rbd.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// The rados user name.
// Default is admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// Keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// SecretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
// Cinder volumes support ownership management and SELinux relabeling.
type CinderVolumeSource struct {
// volume id used to identify the volume in cinder
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// Optional: User is the rados user name, default is admin
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// SecretReference represents a Secret Reference. It has enough information to retrieve secret
// in any namespace
type SecretReference struct {
// Name is unique within a namespace to reference a secret resource.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Namespace defines the space within which the secret name must be unique.
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSPersistentVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// Optional: User is the rados user name, default is admin
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// Represents a Flocker volume mounted by the Flocker agent.
// One and only one of datasetName and datasetUUID should be set.
// Flocker volumes do not support ownership management or SELinux relabeling.
type FlockerVolumeSource struct {
// Name of the dataset stored as metadata -> name on the dataset for Flocker
// should be considered as deprecated
// +optional
DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"`
// UUID of the dataset. This is unique identifier of a Flocker dataset
// +optional
DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"`
}
// StorageMedium defines ways that storage can be allocated to a volume.
type StorageMedium string
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)
)
// Protocol defines network protocols supported for things like container ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
)
// Represents a Persistent Disk resource in Google Compute Engine.
//
// A GCE PD must exist before mounting to a container. The disk must
// also be in the same GCE project and zone as the kubelet. A GCE PD
// can only be mounted as read/write once or read-only many times. GCE
// PDs support ownership management and SELinux relabeling.
type GCEPersistentDiskVolumeSource struct {
// Unique name of the PD resource in GCE. Used to identify the disk in GCE.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// The partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a Quobyte mount that lasts the lifetime of a pod.
// Quobyte volumes do not support ownership management or SELinux relabeling.
type QuobyteVolumeSource struct {
// Registry represents a single or multiple Quobyte Registry services
// specified as a string as host:port pair (multiple entries are separated with commas)
// which acts as the central registry for volumes
Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"`
// Volume is a string that references an already created Quobyte volume by name.
Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"`
// ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// User to map volume access to
// Defaults to serivceaccount user
// +optional
User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"`
// Group to map volume access to
// Default is no group
// +optional
Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"`
}
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
type FlexVolumeSource struct {
// Driver is the name of the driver to use for this volume.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: SecretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: Extra command options if any.
// +optional
Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
}
// Represents a Persistent Disk resource in AWS.
//
// An AWS EBS disk must exist before mounting to a container. The disk
// must also be in the same AWS zone as the kubelet. An AWS EBS disk
// can only be mounted as read/write once. AWS EBS volumes support
// ownership management and SELinux relabeling.
type AWSElasticBlockStoreVolumeSource struct {
// Unique ID of the persistent disk resource in AWS (Amazon EBS volume).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// The partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// Specify "true" to force and set the ReadOnly property in VolumeMounts to "true".
// If omitted, the default is "false".
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a volume that is populated with the contents of a git repository.
// Git repo volumes do not support ownership management.
// Git repo volumes support SELinux relabeling.
type GitRepoVolumeSource struct {
// Repository URL
Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"`
// Commit hash for the specified revision.
// +optional
Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
// Target directory name.
// Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
// git repository. Otherwise, if specified, the volume will contain the git repository in
// the subdirectory with the given name.
// +optional
Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"`
}
// Adapts a Secret into a volume.
//
// The contents of the target Secret's Data field will be presented in a volume
// as files using the keys in the Data field as the file names.
// Secret volumes support ownership management and SELinux relabeling.
type SecretVolumeSource struct {
// Name of the secret in the pod's namespace to use.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"`
// Specify whether the Secret or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
SecretVolumeSourceDefaultMode int32 = 0644
)
// Adapts a secret into a projected volume.
//
// The contents of the target Secret's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names.
// Note that this is identical to a secret volume source without the default
// mode.
type SecretProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Specify whether the Secret or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents an NFS mount that lasts the lifetime of a pod.
// NFS volumes do not support ownership management or SELinux relabeling.
type NFSVolumeSource struct {
// Server is the hostname or IP address of the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Server string `json:"server" protobuf:"bytes,1,opt,name=server"`
// Path that is exported by the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// ReadOnly here will force
// the NFS export to be mounted with read-only permissions.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIVolumeSource struct {
// iSCSI target portal. The portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// Target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// iSCSI target lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// iSCSI target portal List. The portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// CHAP secret for iSCSI target and initiator authentication
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// Custom iSCSI initiator name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// Represents a Fibre Channel volume.
// Fibre Channel volumes can only be mounted as read/write once.
// Fibre Channel volumes support ownership management and SELinux relabeling.
type FCVolumeSource struct {
// Optional: FC target worldwide names (WWNs)
// +optional
TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"`
// Optional: FC target lun number
// +optional
Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: FC volume world wide identifiers (wwids)
// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
// +optional
WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFileVolumeSource struct {
// the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFilePersistentVolumeSource struct {
// the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// the namespace of the secret that contains Azure Storage Account Name and Key
// default is the same as the Pod
// +optional
SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"`
}
// Represents a vSphere volume resource.
type VsphereVirtualDiskVolumeSource struct {
// Path that identifies vSphere volume vmdk
VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Storage Policy Based Management (SPBM) profile name.
// +optional
StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"`
// Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
// +optional
StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"`
}
// Represents a Photon Controller persistent disk resource.
type PhotonPersistentDiskVolumeSource struct {
// ID that identifies Photon Controller persistent disk
PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
type AzureDataDiskCachingMode string
type AzureDataDiskKind string
const (
AzureDataDiskCachingNone AzureDataDiskCachingMode = "None"
AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly"
AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite"
AzureSharedBlobDisk AzureDataDiskKind = "Shared"
AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated"
AzureManagedDisk AzureDataDiskKind = "Managed"
)
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
type AzureDiskVolumeSource struct {
// The Name of the data disk in the blob storage
DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"`
// The URI the data disk in the blob storage
DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
// Host Caching mode: None, Read Only, Read Write.
// +optional
CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
// Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
}
// PortworxVolumeSource represents a Portworx volume resource.
type PortworxVolumeSource struct {
// VolumeID uniquely identifies a Portworx volume
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// FSType represents the filesystem type to mount
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// ScaleIOVolumeSource represents a persistent ScaleIO volume
type ScaleIOVolumeSource struct {
// The host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// The name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// The name of the Protection Domain for the configured storage (defaults to "default").
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// The Storage Pool associated with the protection domain (defaults to "default").
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be thick or thin (defaults to "thin").
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// VolumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// SecretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSPersistentVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// VolumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// SecretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// volume as files using the keys in the Data field as the file names, unless
// the items element is populated with specific mappings of keys to paths.
// ConfigMap volumes support ownership management and SELinux relabeling.
type ConfigMapVolumeSource struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
ConfigMapVolumeSourceDefaultMode int32 = 0644
)
// Adapts a ConfigMap into a projected volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names,
// unless the items element is populated with specific mappings of keys to paths.
// Note that this is identical to a configmap volume source without the default
// mode.
type ConfigMapProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents a projected volume source
type ProjectedVolumeSource struct {
// list of volume projections
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
// Mode bits to use on created files by default. Must be a value between
// 0 and 0777.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
// Projection that may be projected along with other supported volume types
type VolumeProjection struct {
// all types below are the supported types for projection into the same volume
// information about the secret data to project
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
// information about the downwardAPI data to project
DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"`
// information about the configMap data to project
ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
}
const (
ProjectedVolumeSourceDefaultMode int32 = 0644
)
// Maps a string key to a path within a volume.
type KeyToPath struct {
// The key to project.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// The relative path of the file to map the key to.
// May not be an absolute path.
// May not contain the path element '..'.
// May not start with the string '..'.
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
}
// Local represents directly-attached storage with node affinity
type LocalVolumeSource struct {
// The full path to the volume on the node
// For alpha, this path must be a directory
// Once block as a source is supported, then this path can point to a block device
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
}
// ContainerPort represents a network port in a single container.
type ContainerPort struct {
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
// named port in a pod must have a unique name. Name for the port that can be
// referred to by services.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Number of port to expose on the host.
// If specified, this must be a valid port number, 0 < x < 65536.
// If HostNetwork is specified, this must match ContainerPort.
// Most containers do not need this.
// +optional
HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"`
// Number of port to expose on the pod's IP address.
// This must be a valid port number, 0 < x < 65536.
ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"`
// Protocol for port. Must be UDP or TCP.
// Defaults to "TCP".
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
// What host IP to bind the external port to.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
}
// VolumeMount describes a mounting of a Volume within a container.
type VolumeMount struct {
// This must match the Name of a Volume.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Mounted read-only if true, read-write otherwise (false or unspecified).
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
// Path within the container at which the volume should be mounted. Must
// not contain ':'.
MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"`
// Path within the volume from which the container's volume should be mounted.
// Defaults to "" (volume's root).
// +optional
SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"`
// mountPropagation determines how mounts are propagated from the host
// to container and the other way around.
// When not set, MountPropagationHostToContainer is used.
// This field is alpha in 1.8 and can be reworked or removed in a future
// release.
// +optional
MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"`
}
// MountPropagationMode describes mount propagation.
type MountPropagationMode string
const (
// MountPropagationHostToContainer means that the volume in a container will
// receive new mounts from the host or other containers, but filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rslave" in Linux terminology).
MountPropagationHostToContainer MountPropagationMode = "HostToContainer"
// MountPropagationBidirectional means that the volume in a container will
// receive new mounts from the host or other containers, and its own mounts
// will be propagated from the container to the host or other containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rshared" in Linux terminology).
MountPropagationBidirectional MountPropagationMode = "Bidirectional"
)
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Name of the environment variable. Must be a C_IDENTIFIER.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Optional: no more than one of the following may be specified.
// Variable references $(VAR_NAME) are expanded
// using the previous defined environment variables in the container and
// any service environment variables. If a variable cannot be resolved,
// the reference in the input string will be unchanged. The $(VAR_NAME)
// syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
// references will never be expanded, regardless of whether the variable
// exists or not.
// Defaults to "".
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Source for the environment variable's value. Cannot be used if value is not empty.
// +optional
ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"`
}
// EnvVarSource represents a source for the value of an EnvVar.
type EnvVarSource struct {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"`
// Selects a key of a ConfigMap.
// +optional
ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"`
// Selects a key of a secret in the pod's namespace
// +optional
SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
}
// ObjectFieldSelector selects an APIVersioned field of an object.
type ObjectFieldSelector struct {
// Version of the schema the FieldPath is written in terms of, defaults to "v1".
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
// Path of the field to select in the specified API version.
FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"`
}
// ResourceFieldSelector represents container resources (cpu, memory) and their output format
type ResourceFieldSelector struct {
// Container name: required for volumes, optional for env vars
// +optional
ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
// Required: resource to select
Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
// Specifies the output format of the exposed resources, defaults to "1"
// +optional
Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"`
}
// Selects a key from a ConfigMap.
type ConfigMapKeySelector struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key to select.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the ConfigMap or it's key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// SecretKeySelector selects a key of a Secret.
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key of the secret to select from. Must be a valid secret key.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the Secret or it's key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// EnvFromSource represents the source of a set of ConfigMaps
type EnvFromSource struct {
// An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
// +optional
Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
// The ConfigMap to select from
// +optional
ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"`
// The Secret to select from
// +optional
SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
}
// ConfigMapEnvSource selects a ConfigMap to populate the environment
// variables with.
//
// The contents of the target ConfigMap's Data field will represent the
// key-value pairs as environment variables.
type ConfigMapEnvSource struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the ConfigMap must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// SecretEnvSource selects a Secret to populate the environment
// variables with.
//
// The contents of the target Secret's Data field will represent the
// key-value pairs as environment variables.
type SecretEnvSource struct {
// The Secret to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the Secret must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// HTTPHeader describes a custom header to be used in HTTP probes
type HTTPHeader struct {
// The header field name
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// The header field value
Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
}
// HTTPGetAction describes an action based on HTTP Get requests.
type HTTPGetAction struct {
// Path to access on the HTTP server.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
// Name or number of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"`
// Host name to connect to, defaults to the pod IP. You probably want to set
// "Host" in httpHeaders instead.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"`
// Scheme to use for connecting to the host.
// Defaults to HTTP.
// +optional
Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"`
// Custom headers to set in the request. HTTP allows repeated headers.
// +optional
HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"`
}
// URIScheme identifies the scheme used for connection to a host for Get actions
type URIScheme string
const (
// URISchemeHTTP means that the scheme used will be http://
URISchemeHTTP URIScheme = "HTTP"
// URISchemeHTTPS means that the scheme used will be https://
URISchemeHTTPS URIScheme = "HTTPS"
)
// TCPSocketAction describes an action based on opening a socket
type TCPSocketAction struct {
// Number or name of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"`
// Optional: Host name to connect to, defaults to the pod IP.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// ExecAction describes a "run in container" action.
type ExecAction struct {
// Command is the command line to execute inside the container, the working directory for the
// command is root ('/') in the container's filesystem. The command is simply exec'd, it is
// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
// a shell, you need to explicitly call out to that shell.
// Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
}
// Probe describes a health check to be performed against a container to determine whether it is
// alive or ready to receive traffic.
type Probe struct {
// The action taken to determine the health of a container
Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"`
// Number of seconds after the container has started before liveness probes are initiated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"`
// Number of seconds after which the probe times out.
// Defaults to 1 second. Minimum value is 1.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
// How often (in seconds) to perform the probe.
// Default to 10 seconds. Minimum value is 1.
// +optional
PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"`
// Minimum consecutive successes for the probe to be considered successful after having failed.
// Defaults to 1. Must be 1 for liveness. Minimum value is 1.
// +optional
SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"`
// Minimum consecutive failures for the probe to be considered failed after having succeeded.
// Defaults to 3. Minimum value is 1.
// +optional
FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"`
}
// PullPolicy describes a policy for if/when to pull a container image
type PullPolicy string
const (
// PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
PullAlways PullPolicy = "Always"
// PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
PullNever PullPolicy = "Never"
// PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
PullIfNotPresent PullPolicy = "IfNotPresent"
)
// TerminationMessagePolicy describes how termination messages are retrieved from a container.
type TerminationMessagePolicy string
const (
// TerminationMessageReadFile is the default behavior and will set the container status message to
// the contents of the container's terminationMessagePath when the container exits.
TerminationMessageReadFile TerminationMessagePolicy = "File"
// TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs
// for the container status message when the container exits with an error and the
// terminationMessagePath has no contents.
TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError"
)
// Capability represent POSIX capabilities type
type Capability string
// Adds and removes POSIX capabilities from running containers.
type Capabilities struct {
// Added capabilities
// +optional
Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"`
// Removed capabilities
// +optional
Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"`
}
// ResourceRequirements describes the compute resource requirements.
type ResourceRequirements struct {
// Limits describes the maximum amount of compute resources allowed.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
// Requests describes the minimum amount of compute resources required.
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
// otherwise to an implementation-defined value.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
}
const (
// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
TerminationMessagePathDefault string = "/dev/termination-log"
)
// A single application container that you want to run within a pod.
type Container struct {
// Name of the container specified as a DNS_LABEL.
// Each container in a pod must have a unique name (DNS_LABEL).
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Docker image name.
// More info: https://kubernetes.io/docs/concepts/containers/images
Image string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The docker image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The docker image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Container's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of ports to expose from the container. Exposing a port here gives
// the system additional information about the network connections a
// container uses, but is primarily informational. Not specifying a port here
// DOES NOT prevent that port from being exposed. Any port which is
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Cannot be updated.
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
// Compute Resources required by this container.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// Periodic probe of container liveness.
// Container will be restarted if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Periodic probe of container service readiness.
// Container will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// Actions that the management system should take in response to container lifecycle events.
// Cannot be updated.
// +optional
Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Optional: Path at which the file to which the container's termination message
// will be written is mounted into the container's filesystem.
// Message written is intended to be brief final status, such as an assertion failure message.
// Will be truncated by the node if greater than 4096 bytes. The total message length across
// all containers will be limited to 12kb.
// Defaults to /dev/termination-log.
// Cannot be updated.
// +optional
TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Indicate how the termination message should be populated. File will use the contents of
// terminationMessagePath to populate the container status message on both success and failure.
// FallbackToLogsOnError will use the last chunk of container log output if the termination
// message file is empty and the container exited with an error.
// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
// Defaults to File.
// Cannot be updated.
// +optional
TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// Security options the pod should run with.
// More info: https://kubernetes.io/docs/concepts/policy/security-context/
// More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md
// +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
// and shouldn't be used for general purpose containers.
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
// Default is false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the container is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
// +optional
StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
// Default is false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
}
// Handler defines a specific action that should be taken
// TODO: pass structured data to these actions, and document that data here.
type Handler struct {
// One and only one of the following should be specified.
// Exec specifies the action to take.
// +optional
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
// HTTPGet specifies the http request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
// TCPSocket specifies an action involving a TCP port.
// TCP hooks not yet supported
// TODO: implement a realistic TCP lifecycle hook
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
}
// Lifecycle describes actions that the management system should take in response to container lifecycle
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
// until the action is complete, unless the container process fails, in which case the handler is aborted.
type Lifecycle struct {
// PostStart is called immediately after a container is created. If the handler fails,
// the container is terminated and restarted according to its restart policy.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"`
// PreStop is called immediately before a container is terminated.
// The container is terminated after the handler completes.
// The reason for termination is passed to the handler.
// Regardless of the outcome of the handler, the container is eventually terminated.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
}
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// ContainerStateWaiting is a waiting state of a container.
type ContainerStateWaiting struct {
// (brief) reason the container is not yet running.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"`
// Message regarding why the container is not yet running.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
}
// ContainerStateRunning is a running state of a container.
type ContainerStateRunning struct {
// Time at which the container was last (re-)started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"`
}
// ContainerStateTerminated is a terminated state of a container.
type ContainerStateTerminated struct {
// Exit status from the last termination of the container
ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"`
// Signal from the last termination of the container
// +optional
Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"`
// (brief) reason from the last termination of the container
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Message regarding the last termination of the container
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// Time at which previous execution of the container started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"`
// Time at which the container last terminated
// +optional
FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"`
// Container's ID in the format 'docker://<container_id>'
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"`
}
// ContainerState holds a possible state of container.
// Only one of its members may be specified.
// If none of them is specified, the default one is ContainerStateWaiting.
type ContainerState struct {
// Details about a waiting container
// +optional
Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"`
// Details about a running container
// +optional
Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"`
// Details about a terminated container
// +optional
Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"`
}
// ContainerStatus contains details for the current status of this container.
type ContainerStatus struct {
// This must be a DNS_LABEL. Each container in a pod must have a unique name.
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Details about the container's current condition.
// +optional
State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"`
// Details about the container's last termination condition.
// +optional
LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"`
// Specifies whether the container has passed its readiness probe.
Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"`
// The number of times the container has been restarted, currently based on
// the number of dead containers that have not yet been removed.
// Note that this is calculated from dead containers. But those containers are subject to
// garbage collection. This value will get capped at 5 by GC.
RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"`
// The image the container is running.
// More info: https://kubernetes.io/docs/concepts/containers/images
// TODO(dchen1107): Which image the container is running with?
Image string `json:"image" protobuf:"bytes,6,opt,name=image"`
// ImageID of the container's image.
ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"`
// Container's ID in the format 'docker://<container_id>'.
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"`
}
// PodPhase is a label for the condition of a pod at the current time.
type PodPhase string
// These are the valid statuses of pods.
const (
// PodPending means the pod has been accepted by the system, but one or more of the containers
// has not been started. This includes time before being bound to a node, as well as time spent
// pulling images onto the host.
PodPending PodPhase = "Pending"
// PodRunning means the pod has been bound to a node and all of the containers have been started.
// At least one container is still running or is in the process of being restarted.
PodRunning PodPhase = "Running"
// PodSucceeded means that all containers in the pod have voluntarily terminated
// with a container exit code of 0, and the system is not going to restart any of these containers.
PodSucceeded PodPhase = "Succeeded"
// PodFailed means that all containers in the pod have terminated, and at least one container has
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
PodFailed PodPhase = "Failed"
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
// to an error in communicating with the host of the pod.
PodUnknown PodPhase = "Unknown"
)
// PodConditionType is a valid value for PodCondition.Type
type PodConditionType string
// These are valid conditions of pod.
const (
// PodScheduled represents status of the scheduling process for this pod.
PodScheduled PodConditionType = "PodScheduled"
// PodReady means the pod is able to service requests and should be added to the
// load balancing pools of all matching services.
PodReady PodConditionType = "Ready"
// PodInitialized means that all init containers in the pod have started successfully.
PodInitialized PodConditionType = "Initialized"
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
PodReasonUnschedulable = "Unschedulable"
)
// PodCondition contains details for the current condition of this pod.
type PodCondition struct {
// Type is the type of the condition.
// Currently only Ready.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
// Status is the status of the condition.
// Can be True, False, Unknown.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// RestartPolicy describes how the container should be restarted.
// Only one of the following restart policies may be specified.
// If none of the following policies is specified, the default one
// is RestartPolicyAlways.
type RestartPolicy string
const (
RestartPolicyAlways RestartPolicy = "Always"
RestartPolicyOnFailure RestartPolicy = "OnFailure"
RestartPolicyNever RestartPolicy = "Never"
)
// DNSPolicy defines how a pod's DNS will be configured.
type DNSPolicy string
const (
// DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS
// first, if it is available, then fall back on the default
// (as determined by kubelet) DNS settings.
DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet"
// DNSClusterFirst indicates that the pod should use cluster DNS
// first unless hostNetwork is true, if it is available, then
// fall back on the default (as determined by kubelet) DNS settings.
DNSClusterFirst DNSPolicy = "ClusterFirst"
// DNSDefault indicates that the pod should use the default (as
// determined by kubelet) DNS settings.
DNSDefault DNSPolicy = "Default"
DefaultTerminationGracePeriodSeconds = 30
)
// A node selector represents the union of the results of one or more label queries
// over a set of nodes; that is, it represents the OR of the selectors represented
// by the node selector terms.
type NodeSelector struct {
//Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
}
// A null or empty node selector term matches no objects.
type NodeSelectorTerm struct {
//Required. A list of node selector requirements. The requirements are ANDed.
MatchExpressions []NodeSelectorRequirement `json:"matchExpressions" protobuf:"bytes,1,rep,name=matchExpressions"`
}
// A node selector requirement is a selector that contains values, a key, and an operator
// that relates the key and values.
type NodeSelectorRequirement struct {
// The label key that the selector applies to.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Represents a key's relationship to a set of values.
// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"`
// An array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty. If the operator is Gt or Lt, the values
// array must have a single element, which will be interpreted as an integer.
// This array is replaced during a strategic merge patch.
// +optional
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
// A node selector operator is the set of operators that can be used in
// a node selector requirement.
type NodeSelectorOperator string
const (
NodeSelectorOpIn NodeSelectorOperator = "In"
NodeSelectorOpNotIn NodeSelectorOperator = "NotIn"
NodeSelectorOpExists NodeSelectorOperator = "Exists"
NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
NodeSelectorOpGt NodeSelectorOperator = "Gt"
NodeSelectorOpLt NodeSelectorOperator = "Lt"
)
// Affinity is a group of affinity scheduling rules.
type Affinity struct {
// Describes node affinity scheduling rules for the pod.
// +optional
NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"`
// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"`
// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"`
}
// Pod affinity is a group of inter pod affinity scheduling rules.
type PodAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
type PodAntiAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the anti-affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling anti-affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
type WeightedPodAffinityTerm struct {
// weight associated with matching the corresponding podAffinityTerm,
// in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// Required. A pod affinity term, associated with the corresponding weight.
PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"`
}
// Defines a set of pods (namely those matching the labelSelector
// relative to the given namespace(s)) that this pod should be
// co-located (affinity) or not co-located (anti-affinity) with,
// where co-located is defined as running on a node whose value of
// the label with key <topologyKey> tches that of any node on which
// a pod of the set of pods is running
type PodAffinityTerm struct {
// A label query over a set of resources, in this case pods.
// +optional
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
// namespaces specifies which namespaces the labelSelector applies to (matches against);
// null or empty list means "this pod's namespace"
Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"`
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
// whose value of the label with key topologyKey matches that of any node on which any of the
// selected pods is running.
// For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies"
// ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains);
// for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.
// +optional
TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"`
}
// Node affinity is a group of node affinity scheduling rules.
type NodeAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// will try to eventually evict the pod from its node.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// may or may not try to eventually evict the pod from its node.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node matches the corresponding matchExpressions; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// An empty preferred scheduling term matches all objects with implicit weight 0
// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
type PreferredSchedulingTerm struct {
// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// A node selector term, associated with the corresponding weight.
Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
}
// The node this Taint is attached to has the "effect" on
// any pod that does not tolerate the Taint.
type Taint struct {
// Required. The taint key to be applied to a node.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Required. The taint value corresponding to the taint key.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Required. The effect of the taint on pods
// that do not tolerate the taint.
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
// TimeAdded represents the time at which the taint was added.
// It is only written for NoExecute taints.
// +optional
TimeAdded metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
}
type TaintEffect string
const (
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
// Enforced by the scheduler.
TaintEffectNoSchedule TaintEffect = "NoSchedule"
// Like TaintEffectNoSchedule, but the scheduler tries not to schedule
// new pods onto the node, rather than prohibiting new pods from scheduling
// onto the node entirely. Enforced by the scheduler.
TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// Like TaintEffectNoSchedule, but additionally do not allow pods submitted to
// Kubelet without going through the scheduler to start.
// Enforced by Kubelet and the scheduler.
// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
// Evict any already-running pods that do not tolerate the taint.
// Currently enforced by NodeController.
TaintEffectNoExecute TaintEffect = "NoExecute"
)
// The pod this Toleration is attached to tolerates any taint that matches
// the triple <key,value,effect> using the matching operator <operator>.
type Toleration struct {
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
// +optional
Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
// Operator represents a key's relationship to the value.
// Valid operators are Exists and Equal. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
// +optional
Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
// Value is the taint value the toleration matches to.
// If the operator is Exists, the value should be empty, otherwise just a regular string.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
// Effect indicates the taint effect to match. Empty means match all taint effects.
// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
// +optional
Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
// TolerationSeconds represents the period of time the toleration (which must be
// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
// it is not set, which means tolerate the taint forever (do not evict). Zero and
// negative values will be treated as 0 (evict immediately) by the system.
// +optional
TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"`
}
// A toleration operator is the set of operators that can be used in a toleration.
type TolerationOperator string
const (
TolerationOpExists TolerationOperator = "Exists"
TolerationOpEqual TolerationOperator = "Equal"
)
const (
// This annotation key will be used to contain an array of v1 JSON encoded Containers
// for init containers. The annotation will be placed into the internal type and cleared.
// This key is only recognized by version >= 1.4.
PodInitContainersBetaAnnotationKey = "pod.beta.kubernetes.io/init-containers"
// This annotation key will be used to contain an array of v1 JSON encoded Containers
// for init containers. The annotation will be placed into the internal type and cleared.
// This key is recognized by version >= 1.3. For version 1.4 code, this key
// will have its value copied to the beta key.
PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers"
// This annotation key will be used to contain an array of v1 JSON encoded
// ContainerStatuses for init containers. The annotation will be placed into the internal
// type and cleared. This key is only recognized by version >= 1.4.
PodInitContainerStatusesBetaAnnotationKey = "pod.beta.kubernetes.io/init-container-statuses"
// This annotation key will be used to contain an array of v1 JSON encoded
// ContainerStatuses for init containers. The annotation will be placed into the internal
// type and cleared. This key is recognized by version >= 1.3. For version 1.4 code,
// this key will have its value copied to the beta key.
PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses"
)
// PodSpec is a description of a pod.
type PodSpec struct {
// List of volumes that can be mounted by containers belonging to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes
// +optional
// +patchMergeKey=name
// +patchStrategy=merge,retainKeys
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
// List of initialization containers belonging to the pod.
// Init containers are executed in order prior to containers being started. If any
// init container fails, the pod is considered to have failed and is handled according
// to its restartPolicy. The name for an init container or normal container must be
// unique among all containers.
// Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
// of that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
// +patchMergeKey=name
// +patchStrategy=merge
InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
// List of containers belonging to the pod.
// Containers cannot currently be added or removed.
// There must be at least one container in a Pod.
// Cannot be updated.
// +patchMergeKey=name
// +patchStrategy=merge
Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
// Restart policy for all containers within the pod.
// One of Always, OnFailure, Never.
// Default to Always.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
// +optional
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
// Value must be non-negative integer. The value zero indicates delete immediately.
// If this value is nil, the default grace period will be used instead.
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
// Defaults to 30 seconds.
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
// Optional duration in seconds the pod may be active on the node relative to
// StartTime before the system will actively try to mark it failed and kill associated containers.
// Value must be a positive integer.
// +optional
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
// Set DNS policy for containers within the pod.
// One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'.
// Defaults to "ClusterFirst".
// To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.
// +optional
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
// Deprecated: Use serviceAccountName instead.
// +k8s:conversion-gen=false
// +optional
DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"`
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
// requirements.
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
// Host networking requested for this pod. Use the host's network namespace.
// If this option is set, the ports that will be used must be specified.
// Default to false.
// +k8s:conversion-gen=false
// +optional
HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
// Use the host's pid namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
// Use the host's ipc namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
// If specified, these secrets will be passed to individual puller implementations for them to use. For example,
// in the case of docker, only DockerConfig type secrets are honored.
// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
// Specifies the hostname of the Pod
// If not specified, the pod's hostname will be set to a system-defined value.
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
// If not specified, the pod will not have a domainname at all.
// +optional
Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
// If specified, the pod's scheduling constraints
// +optional
Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
// If specified, the pod will be dispatched by specified scheduler.
// If not specified, the pod will be dispatched by default scheduler.
// +optional
SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
// If specified, the pod's tolerations.
// +optional
Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
// file if specified. This is only valid for non-hostNetwork pods.
// +optional
// +patchMergeKey=ip
// +patchStrategy=merge
HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"`
// If specified, indicates the pod's priority. "SYSTEM" is a special keyword
// which indicates the highest priority. Any other name must be defined by
// creating a PriorityClass object with that name.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"`
// The priority value. Various system components use this field to find the
// priority of the pod. When Priority Admission Controller is enabled, it
// prevents users from setting this field. The admission controller populates
// this field from PriorityClassName.
// The higher the value, the higher the priority.
// +optional
Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"`
}
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
// pod's hosts file.
type HostAlias struct {
// IP address of the host file entry.
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostnames for the above IP address.
Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
}
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
type PodSecurityContext struct {
// The SELinux context to be applied to all containers.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in SecurityContext. If set in
// both SecurityContext and PodSecurityContext, the value specified in SecurityContext
// takes precedence for that container.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence
// for that container.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
// A list of groups applied to the first process run in each container, in addition
// to the container's primary GID. If unspecified, no groups will be added to
// any container.
// +optional
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
// A special supplemental group that applies to all containers in a pod.
// Some volume types allow the Kubelet to change the ownership of that volume
// to be owned by the pod:
//
// 1. The owning GID will be the FSGroup
// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
// 3. The permission bits are OR'd with rw-rw----
//
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
// +optional
FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"`
}
// PodQOSClass defines the supported qos classes of Pods.
type PodQOSClass string
const (
// PodQOSGuaranteed is the Guaranteed qos class.
PodQOSGuaranteed PodQOSClass = "Guaranteed"
// PodQOSBurstable is the Burstable qos class.
PodQOSBurstable PodQOSClass = "Burstable"
// PodQOSBestEffort is the BestEffort qos class.
PodQOSBestEffort PodQOSClass = "BestEffort"
)
// PodStatus represents information about the status of a pod. Status may trail the actual
// state of a system.
type PodStatus struct {
// Current condition of the pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
// +optional
Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"`
// Current service state of pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
// A human readable message indicating details about why the pod is in this condition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// A brief CamelCase message indicating details about why the pod is in this state.
// e.g. 'Evicted'
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// IP address of the host to which the pod is assigned. Empty if not yet scheduled.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
// IP address allocated to the pod. Routable at least within the cluster.
// Empty if not yet allocated.
// +optional
PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
// RFC 3339 date and time at which the object was acknowledged by the Kubelet.
// This is before the Kubelet pulled the container image(s) for the pod.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
// The list has one entry per init container in the manifest. The most recent successful
// init container will have ready = true, the most recently started container will have
// startTime set.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
// The list has one entry per container in the manifest. Each entry is currently the output
// of `docker inspect`.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
// +optional
ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
// The Quality of Service (QOS) classification assigned to the pod based on resource requirements
// See PodQOSClass type for available QOS classes
// More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md
// +optional
QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
type PodStatusResult struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Pod is a collection of containers that can run on a host. This resource is created
// by clients and scheduled onto hosts.
type Pod struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodList is a list of Pods.
type PodList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pods.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md
Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PodTemplateSpec describes the data a pod should have when created from a template
type PodTemplateSpec struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplate describes a template for creating copies of a predefined pod.
type PodTemplate struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Template defines the pods that will be created from this pod template.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplateList is a list of PodTemplates.
type PodTemplateList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pod templates
Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ReplicationControllerSpec is the specification of a replication controller.
type ReplicationControllerSpec struct {
// Replicas is the number of desired replicas.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// Selector is a label query over pods that should match the Replicas count.
// If Selector is empty, it is defaulted to the labels present on the Pod template.
// Label keys and values that must match in order to be controlled by this replication
// controller, if empty defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// TemplateRef is a reference to an object that describes the pod that will be created if
// insufficient replicas are detected.
// Reference to an object that describes the pod that will be created if insufficient replicas are detected.
// +optional
// TemplateRef *ObjectReference `json:"templateRef,omitempty"`
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. This takes precedence over a TemplateRef.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
// +optional
Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// ReplicationControllerStatus represents the current status of a replication
// controller.
type ReplicationControllerStatus struct {
// Replicas is the most recently oberved number of replicas.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
// The number of pods that have labels matching the labels of the pod template of the replication controller.
// +optional
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
// The number of ready replicas for this replication controller.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
// The number of available replicas (ready for at least minReadySeconds) for this replication controller.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
// ObservedGeneration reflects the generation of the most recently observed replication controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
// Represents the latest available observations of a replication controller's current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
}
type ReplicationControllerConditionType string
// These are valid conditions of a replication controller.
const (
// ReplicationControllerReplicaFailure is added in a replication controller when one of its pods
// fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors,
// etc. or deleted due to kubelet being down or finalizers are failing.
ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure"
)
// ReplicationControllerCondition describes the state of a replication controller at a certain point.
type ReplicationControllerCondition struct {
// Type of replication controller condition.
Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationController represents the configuration of a replication controller.
type ReplicationController struct {
metav1.TypeMeta `json:",inline"`
// If the Labels of a ReplicationController are empty, they are defaulted to
// be the same as the Pod(s) that the replication controller manages.
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the specification of the desired behavior of the replication controller.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status is the most recently observed status of the replication controller.
// This data may be out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationControllerList is a collection of replication controllers.
type ReplicationControllerList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of replication controllers.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Session Affinity Type string
type ServiceAffinity string
const (
// ServiceAffinityClientIP is the Client IP based.
ServiceAffinityClientIP ServiceAffinity = "ClientIP"
// ServiceAffinityNone - no session affinity.
ServiceAffinityNone ServiceAffinity = "None"
)
// SessionAffinityConfig represents the configurations of session affinity.
type SessionAffinityConfig struct {
// clientIP contains the configurations of Client IP based session affinity.
// +optional
ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"`
}
// ClientIPConfig represents the configurations of Client IP based session affinity.
type ClientIPConfig struct {
// timeoutSeconds specifies the seconds of ClientIP type session sticky time.
// The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
// Default value is 10800(for 3 hours).
// +optional
TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
}
// Service Type string describes ingress methods for a service
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the cluster IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
// ServiceTypeExternalName means a service consists of only a reference to
// an external name that kubedns or equivalent will return as a CNAME
// record, with no exposing or proxying of any pods involved.
ServiceTypeExternalName ServiceType = "ExternalName"
)
// Service External Traffic Policy Type string
type ServiceExternalTrafficPolicyType string
const (
// ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior.
ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local"
// ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior.
ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
)
// ServiceStatus represents the current status of a service.
type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
// +optional
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
}
// LoadBalancerStatus represents the status of a load-balancer.
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer.
// Traffic intended for the service should be sent to these ingress points.
// +optional
Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
// +optional
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
}
// ServiceSpec describes the attributes that a user creates on a service.
type ServiceSpec struct {
// The list of ports that are exposed by this service.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +patchMergeKey=port
// +patchStrategy=merge
Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
// Route service traffic to pods with label keys and values matching this
// selector. If empty or not present, the service is assumed to have an
// external process managing its endpoints, which Kubernetes will not
// modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
// Ignored if type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// clusterIP is the IP address of the service and is usually assigned
// randomly by the master. If an address is specified manually and is not in
// use by others, it will be allocated to the service; otherwise, creation
// of the service will fail. This field can not be changed through updates.
// Valid values are "None", empty string (""), or a valid IP address. "None"
// can be specified for headless services when proxying is not required.
// Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if
// type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"`
// type determines how the Service is exposed. Defaults to ClusterIP. Valid
// options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
// "ExternalName" maps to the specified externalName.
// "ClusterIP" allocates a cluster-internal IP address for load-balancing to
// endpoints. Endpoints are determined by the selector or if that is not
// specified, by manual construction of an Endpoints object. If clusterIP is
// "None", no virtual IP is allocated and the endpoints are published as a
// set of endpoints rather than a stable IP.
// "NodePort" builds on ClusterIP and allocates a port on every node which
// routes to the clusterIP.
// "LoadBalancer" builds on NodePort and creates an
// external load-balancer (if supported in the current cloud) which routes
// to the clusterIP.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
// +optional
Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
// externalIPs is a list of IP addresses for which nodes in the cluster
// will also accept traffic for this service. These IPs are not managed by
// Kubernetes. The user is responsible for ensuring that traffic arrives
// at a node with this IP. A common example is external load-balancers
// that are not part of the Kubernetes system.
// +optional
ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"`
// Supports "ClientIP" and "None". Used to maintain session affinity.
// Enable client IP based session affinity.
// Must be ClientIP or None.
// Defaults to None.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"`
// Only applies to Service Type: LoadBalancer
// LoadBalancer will get created with the IP specified in this field.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
// +optional
LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
// If specified and supported by the platform, this will restrict traffic through the cloud-provider
// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
// cloud-provider does not support the feature."
// More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
// +optional
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
// externalName is the external reference that kubedns or equivalent will
// return as a CNAME record for this service. No proxying will be involved.
// Must be a valid DNS name and requires Type to be ExternalName.
// +optional
ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"`
// externalTrafficPolicy denotes if this Service desires to route external
// traffic to node-local or cluster-wide endpoints. "Local" preserves the
// client source IP and avoids a second hop for LoadBalancer and Nodeport
// type services, but risks potentially imbalanced traffic spreading.
// "Cluster" obscures the client source IP and may cause a second hop to
// another node, but should have good overall load-spreading.
// +optional
ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"`
// healthCheckNodePort specifies the healthcheck nodePort for the service.
// If not specified, HealthCheckNodePort is created by the service api
// backend with the allocated nodePort. Will use user-specified nodePort value
// if specified by the client. Only effects when Type is set to LoadBalancer
// and ExternalTrafficPolicy is set to Local.
// +optional
HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"`
// publishNotReadyAddresses, when set to true, indicates that DNS implementations
// must publish the notReadyAddresses of subsets for the Endpoints associated with
// the Service. The default value is false.
// The primary use case for setting this field is to use a StatefulSet's Headless Service
// to propagate SRV records for its Pods without respect to their readiness for purpose
// of peer discovery.
// This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints
// when that annotation is deprecated and all clients have been converted to use this
// field.
// +optional
PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
// sessionAffinityConfig contains the configurations of session affinity.
// +optional
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
}
// ServicePort contains information on service's port.
type ServicePort struct {
// The name of this port within the service. This must be a DNS_LABEL.
// All ports within a ServiceSpec must have unique names. This maps to
// the 'Name' field in EndpointPort objects.
// Optional if only one ServicePort is defined on this service.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The IP protocol for this port. Supports "TCP" and "UDP".
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
// The port that will be exposed by this service.
Port int32 `json:"port" protobuf:"varint,3,opt,name=port"`
// Number or name of the port to access on the pods targeted by the service.
// Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
// If this is a string, it will be looked up as a named port in the
// target Pod's container ports. If this is not specified, the value
// of the 'port' field is used (an identity map).
// This field is ignored for services with clusterIP=None, and should be
// omitted or set equal to the 'port' field.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
// +optional
TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"`
// The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
// Usually assigned by the system. If specified, it will be allocated to the service
// if unused or else creation of the service will fail.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
// +optional
NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Service is a named abstraction of software service (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the selector that determines which pods
// will answer requests sent through the proxy.
type Service struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a service.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the service.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
const (
// ClusterIPNone - do not assign a cluster IP
// no proxying required and no environment variables should be created for pods
ClusterIPNone = "None"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceList holds a list of services.
type ServiceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of services
Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccount binds together:
// * a name, understood by users, and perhaps by peripheral systems, for an identity
// * a principal that can be authenticated and authorized
// * a set of secrets
type ServiceAccount struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
// in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
// can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
// More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
// +optional
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"`
// AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
// Can be overridden at the pod level.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccountList is a list of ServiceAccount objects
type ServiceAccountList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ServiceAccounts.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Endpoints is a collection of endpoints that implement the actual service. Example:
// Name: "mysvc",
// Subsets: [
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// },
// {
// Addresses: [{"ip": "10.10.3.3"}],
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
type Endpoints struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The set of all endpoints is the union of all subsets. Addresses are placed into
// subsets according to the IPs they share. A single address with multiple ports,
// some of which are ready and some of which are not (because they come from
// different containers) will result in the address being displayed in different
// subsets for the different ports. No address will appear in both Addresses and
// NotReadyAddresses in the same subset.
// Sets of addresses and ports that comprise a service.
Subsets []EndpointSubset `json:"subsets" protobuf:"bytes,2,rep,name=subsets"`
}
// EndpointSubset is a group of addresses with a common set of ports. The
// expanded set of endpoints is the Cartesian product of Addresses x Ports.
// For example, given:
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// }
// The resulting set of endpoints can be viewed as:
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
type EndpointSubset struct {
// IP addresses which offer the related ports that are marked as ready. These endpoints
// should be considered safe for load balancers and clients to utilize.
// +optional
Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"`
// IP addresses which offer the related ports but are not currently marked as ready
// because they have not yet finished starting, have recently failed a readiness check,
// or have recently failed a liveness check.
// +optional
NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"`
// Port numbers available on the related IP addresses.
// +optional
Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
}
// EndpointAddress is a tuple that describes single IP address.
type EndpointAddress struct {
// The IP of this endpoint.
// May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
// or link-local multicast ((224.0.0.0/24).
// IPv6 is also accepted but not fully supported on all platforms. Also, certain
// kubernetes components, like kube-proxy, are not IPv6 ready.
// TODO: This should allow hostname or IP, See #4447.
IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
// The Hostname of this endpoint
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
// Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
// +optional
NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"`
// Reference to object providing the endpoint.
// +optional
TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"`
}
// EndpointPort is a tuple that describes a single port.
type EndpointPort struct {
// The name of this port (corresponds to ServicePort.Name).
// Must be a DNS_LABEL.
// Optional only if one port is defined.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The port number of the endpoint.
Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
// The IP protocol for this port.
// Must be UDP or TCP.
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EndpointsList is a list of endpoints.
type EndpointsList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of endpoints.
Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// NodeSpec describes the attributes that a node is created with.
type NodeSpec struct {
// PodCIDR represents the pod IP range assigned to the node.
// +optional
PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
// External ID of the node assigned by some machine database (e.g. a cloud provider).
// Deprecated.
// +optional
ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
// ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
// +optional
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
// +optional
Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
// If specified, the node's taints.
// +optional
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
// If specified, the source to get node configuration from
// The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
// +optional
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
type NodeConfigSource struct {
metav1.TypeMeta `json:",inline"`
ConfigMapRef *ObjectReference `json:"configMapRef,omitempty" protobuf:"bytes,1,opt,name=configMapRef"`
}
// DaemonEndpoint contains information about a single Daemon endpoint.
type DaemonEndpoint struct {
/*
The port tag was not properly in quotes in earlier releases, so it must be
uppercased for backwards compat (since it was falling back to var name of
'Port').
*/
// Port number of the given endpoint.
Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"`
}
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
type NodeDaemonEndpoints struct {
// Endpoint on which Kubelet is listening.
// +optional
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"`
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
// Kubelet Version reported by the node.
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
// KubeProxy Version reported by the node.
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
}
// NodeStatus is information about the current status of a node.
type NodeStatus struct {
// Capacity represents the total resources of a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Allocatable represents the resources of a node that are available for scheduling.
// Defaults to Capacity.
// +optional
Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"`
// NodePhase is the recently observed lifecycle phase of the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
// The field is never populated, and now is deprecated.
// +optional
Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
// Conditions is an array of current observed node conditions.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
// List of addresses reachable to the node.
// Queried from cloud provider, if available.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
// Endpoints of daemons running on the Node.
// +optional
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
// Set of ids/uuids to uniquely identify the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#info
// +optional
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
// List of container images on this node
// +optional
Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
// List of attachable volumes in use (mounted) by the node.
// +optional
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
// List of volumes that are attached to the node.
// +optional
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
}
type UniqueVolumeName string
// AttachedVolume describes a volume attached to a node
type AttachedVolume struct {
// Name of the attached volume
Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"`
// DevicePath represents the device path where the volume should be available
DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"`
}
// AvoidPods describes pods that should avoid this node. This is the value for a
// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and
// will eventually become a field of NodeStatus.
type AvoidPods struct {
// Bounded-sized list of signatures of pods that should avoid this node, sorted
// in timestamp order from oldest to newest. Size of the slice is unspecified.
// +optional
PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"`
}
// Describes a class of pods that should avoid this node.
type PreferAvoidPodsEntry struct {
// The class of pods.
PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"`
// Time at which this entry was added to the list.
// +optional
EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"`
// (brief) reason why this entry was added to the list.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Human readable message indicating why this entry was added to the list.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
}
// Describes the class of pods that should avoid this node.
// Exactly one field should be set.
type PodSignature struct {
// Reference to controller whose pods should avoid this node.
// +optional
PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"`
}
// Describe a container image
type ContainerImage struct {
// Names by which this image is known.
// e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
Names []string `json:"names" protobuf:"bytes,1,rep,name=names"`
// The size of the image in bytes.
// +optional
SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"`
}
type NodePhase string
// These are the valid phases of node.
const (
// NodePending means the node has been created/added by the system, but not configured.
NodePending NodePhase = "Pending"
// NodeRunning means the node has been configured and has Kubernetes components running.
NodeRunning NodePhase = "Running"
// NodeTerminated means the node has been removed from the cluster.
NodeTerminated NodePhase = "Terminated"
)
type NodeConditionType string
// These are valid conditions of node. Currently, we don't have enough information to decide
// node condition. In the future, we will add more. The proposed set of conditions are:
// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable.
const (
// NodeReady means kubelet is healthy and ready to accept pods.
NodeReady NodeConditionType = "Ready"
// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
// space on the node.
NodeOutOfDisk NodeConditionType = "OutOfDisk"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
NodeDiskPressure NodeConditionType = "DiskPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
)
// NodeCondition contains condition information for a node.
type NodeCondition struct {
// Type of node condition.
Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we got an update on a given condition.
// +optional
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"`
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
type NodeAddressType string
// These are valid address type of node.
const (
NodeHostName NodeAddressType = "Hostname"
NodeExternalIP NodeAddressType = "ExternalIP"
NodeInternalIP NodeAddressType = "InternalIP"
NodeExternalDNS NodeAddressType = "ExternalDNS"
NodeInternalDNS NodeAddressType = "InternalDNS"
)
// NodeAddress contains information for the node's address.
type NodeAddress struct {
// Node address type, one of Hostname, ExternalIP or InternalIP.
Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"`
// The node address.
Address string `json:"address" protobuf:"bytes,2,opt,name=address"`
}
// ResourceName is the name identifying various resources in a ResourceList.
type ResourceName string
// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
// with the -, _, and . characters allowed anywhere, except the first or last character.
// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
// camel case, separating compound words.
// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
const (
// CPU, in cores. (500m = .5 cores)
ResourceCPU ResourceName = "cpu"
// Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceMemory ResourceName = "memory"
// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
ResourceStorage ResourceName = "storage"
// Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceEphemeralStorage is alpha and it can change across releases.
ResourceEphemeralStorage ResourceName = "ephemeral-storage"
// NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned.
ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu"
)
const (
// Namespace prefix for opaque counted resources (alpha).
ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-"
// Default namespace prefix.
ResourceDefaultNamespacePrefix = "kubernetes.io/"
)
// ResourceList is a set of (resource name, quantity) pairs.
type ResourceList map[ResourceName]resource.Quantity
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Node is a worker node in Kubernetes.
// Each node will have a unique identifier in the cache (i.e. in etcd).
type Node struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a node.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the node.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeList is the whole list of all Nodes which have been registered with master.
type NodeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of nodes
Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// FinalizerName is the name identifying a finalizer during namespace lifecycle.
type FinalizerName string
// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or
// in metav1.
const (
FinalizerKubernetes FinalizerName = "kubernetes"
)
// NamespaceSpec describes the attributes on a Namespace.
type NamespaceSpec struct {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
// More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#finalizers
// +optional
Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"`
}
// NamespaceStatus is information about the current status of a Namespace.
type NamespaceStatus struct {
// Phase is the current lifecycle phase of the namespace.
// More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases
// +optional
Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"`
}
type NamespacePhase string
// These are the valid phases of a namespace.
const (
// NamespaceActive means the namespace is available for use in the system
NamespaceActive NamespacePhase = "Active"
// NamespaceTerminating means the namespace is undergoing graceful termination
NamespaceTerminating NamespacePhase = "Terminating"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Namespace provides a scope for Names.
// Use of multiple namespaces is optional.
type Namespace struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of the Namespace.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status describes the current status of a Namespace.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NamespaceList is a list of Namespaces.
type NamespaceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of Namespace objects in the list.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
// Deprecated in 1.7, please use the bindings subresource of pods instead.
type Binding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The target object that you want to bind to the standard object.
Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"`
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
// +k8s:openapi-gen=false
type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
// DeletionPropagation decides if a deletion will propagate to the dependents of the object, and how the garbage collector will handle the propagation.
type DeletionPropagation string
const (
// Orphans the dependents.
DeletePropagationOrphan DeletionPropagation = "Orphan"
// Deletes the object from the key-value store, the garbage collector will delete the dependents in the background.
DeletePropagationBackground DeletionPropagation = "Background"
// The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store.
// API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp.
// This policy is cascading, i.e., the dependents will be deleted with Foreground.
DeletePropagationForeground DeletionPropagation = "Foreground"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DeleteOptions may be provided when deleting an API object
// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
// +k8s:openapi-gen=false
type DeleteOptions struct {
metav1.TypeMeta `json:",inline"`
// The duration in seconds before the object should be deleted. Value must be non-negative integer.
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
// specified type will be used.
// Defaults to a per object value if not specified. zero means delete immediately.
// +optional
GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"`
// Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
// returned.
// +optional
Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"`
// Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
// Should the dependent objects be orphaned. If true/false, the "orphan"
// finalizer will be added to/removed from the object's finalizers list.
// Either this field or PropagationPolicy may be set, but not both.
// +optional
OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"`
// Whether and how garbage collection will be performed.
// Either this field or OrphanDependents may be set, but not both.
// The default policy is decided by the existing finalizer set in the
// metadata.finalizers and the resource-specific default policy.
// +optional
PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ListOptions is the query options to a standard REST list call.
// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
// +k8s:openapi-gen=false
type ListOptions struct {
metav1.TypeMeta `json:",inline"`
// A selector to restrict the list of returned objects by their labels.
// Defaults to everything.
// +optional
LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
// A selector to restrict the list of returned objects by their fields.
// Defaults to everything.
// +optional
FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"`
// If true, partially initialized resources are included in the response.
// +optional
IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"`
// Watch for changes to the described resources and return them as a stream of
// add, update, and remove notifications. Specify resourceVersion.
// +optional
Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"`
// When specified with a watch call, shows changes that occur after that particular version of a resource.
// Defaults to changes from the beginning of history.
// When specified for list:
// - if unset, then the result is returned from remote storage based on quorum-read flag;
// - if it's 0, then we simply return what we currently have in cache, no guarantee;
// - if set to non zero, then the result is at least as fresh as given rv.
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
// Timeout for the list/watch call.
// +optional
TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodLogOptions is the query options for a Pod's logs REST call.
type PodLogOptions struct {
metav1.TypeMeta `json:",inline"`
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
// Follow the log stream of the pod. Defaults to false.
// +optional
Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
// Return previous terminated container logs. Defaults to false.
// +optional
Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
// A relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
// An RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output. Defaults to false.
// +optional
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
// If set, the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
// +optional
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
// If set, the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
// +optional
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodAttachOptions is the query options to a Pod's remote attach call.
// ---
// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodAttachOptions struct {
metav1.TypeMeta `json:",inline"`
// Stdin if true, redirects the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Stdout if true indicates that stdout is to be redirected for the attach call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Stderr if true indicates that stderr is to be redirected for the attach call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the attach call.
// This is passed through the container runtime so the tty
// is allocated on the worker node by the container runtime.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// The container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodExecOptions is the query options to a Pod's remote exec call.
// ---
// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodExecOptions struct {
metav1.TypeMeta `json:",inline"`
// Redirect the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Redirect the standard output stream of the pod for this call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Redirect the standard error stream of the pod for this call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the exec call.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// Container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
// Command is the remote command to execute. argv array. Not executed within a shell.
Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodPortForwardOptions is the query options to a Pod's port forward call
// when using WebSockets.
// The `port` query parameter must specify the port or
// ports (comma separated) to forward over.
// Port forwarding over SPDY does not use these options. It requires the port
// to be passed in the `port` header as part of request.
type PodPortForwardOptions struct {
metav1.TypeMeta `json:",inline"`
// List of ports to forward
// Required when using WebSockets
// +optional
Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodProxyOptions is the query options to a Pod's proxy call.
type PodProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to pod.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeProxyOptions is the query options to a Node's proxy call.
type NodeProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to node.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceProxyOptions is the query options to a Service's proxy call.
type ServiceProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the part of URLs that include service endpoints, suffixes,
// and parameters to use for the current proxy request to service.
// For example, the whole request URL is
// http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
// Path is _search?q=user:kimchy.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ObjectReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
// Namespace of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
// UID of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// API version of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"`
// Specific resourceVersion to which this reference is made, if any.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// If referring to a piece of an object instead of an entire object, this string
// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
// For example, if the object reference is to a container within a pod, this would take on a value like:
// "spec.containers{name}" (where "name" refers to the name of the container that triggered
// the event) or if no container name is specified "spec.containers[2]" (container with
// index 2 in this pod). This syntax is chosen only to have some well-defined way of
// referencing a part of an object.
// TODO: this design is not final and this field is subject to change in the future.
// +optional
FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"`
}
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// TODO: Add other useful fields. apiVersion, kind, uid?
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SerializedReference is a reference to serialized object.
type SerializedReference struct {
metav1.TypeMeta `json:",inline"`
// The reference to an object in the system.
// +optional
Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
}
// EventSource contains information for an event.
type EventSource struct {
// Component from which the event is generated.
// +optional
Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
// Node name on which the event is generated.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// Valid values for event types (new types could be added in future)
const (
// Information only and will not cause any problems
EventTypeNormal string = "Normal"
// These events are to warn that something might go wrong
EventTypeWarning string = "Warning"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Event is a report of an event somewhere in the cluster.
// TODO: Decide whether to store these separately or with the object they apply to.
type Event struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
// The object that this event is about.
InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"`
// This should be a short, machine understandable string that gives the reason
// for the transition into the object's current status.
// TODO: provide exact specification for format.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// A human-readable description of the status of this operation.
// TODO: decide on maximum length.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// The component reporting this event. Should be a short machine understandable string.
// +optional
Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"`
// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
// +optional
FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"`
// The time at which the most recent occurrence of this event was recorded.
// +optional
LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"`
// The number of times this event has occurred.
// +optional
Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"`
// Type of this event (Normal, Warning), new types could be added in the future
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EventList is a list of events.
type EventList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of events
Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// List holds a list of objects, which may not be known by the server.
type List metav1.List
// LimitType is a type of object that is limited
type LimitType string
const (
// Limit that applies to all pods in a namespace
LimitTypePod LimitType = "Pod"
// Limit that applies to all containers in a namespace
LimitTypeContainer LimitType = "Container"
// Limit that applies to all persistent volume claims in a namespace
LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim"
)
// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
type LimitRangeItem struct {
// Type of resource that this limit applies to.
// +optional
Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
// Max usage constraints on this kind by resource name.
// +optional
Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"`
// Min usage constraints on this kind by resource name.
// +optional
Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"`
// Default resource requirement limit value by resource name if resource limit is omitted.
// +optional
Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"`
// DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
// +optional
DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"`
// MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
// +optional
MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"`
}
// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
type LimitRangeSpec struct {
// Limits is the list of LimitRangeItem objects that are enforced.
Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
type LimitRange struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the limits enforced.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRangeList is a list of LimitRange items.
type LimitRangeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of LimitRange objects.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md
Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// The following identify resource constants for Kubernetes object types
const (
// Pods, number
ResourcePods ResourceName = "pods"
// Services, number
ResourceServices ResourceName = "services"
// ReplicationControllers, number
ResourceReplicationControllers ResourceName = "replicationcontrollers"
// ResourceQuotas, number
ResourceQuotas ResourceName = "resourcequotas"
// ResourceSecrets, number
ResourceSecrets ResourceName = "secrets"
// ResourceConfigMaps, number
ResourceConfigMaps ResourceName = "configmaps"
// ResourcePersistentVolumeClaims, number
ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
// ResourceServicesNodePorts, number
ResourceServicesNodePorts ResourceName = "services.nodeports"
// ResourceServicesLoadBalancers, number
ResourceServicesLoadBalancers ResourceName = "services.loadbalancers"
// CPU request, in cores. (500m = .5 cores)
ResourceRequestsCPU ResourceName = "requests.cpu"
// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsMemory ResourceName = "requests.memory"
// Storage request, in bytes
ResourceRequestsStorage ResourceName = "requests.storage"
// Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage"
// CPU limit, in cores. (500m = .5 cores)
ResourceLimitsCPU ResourceName = "limits.cpu"
// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsMemory ResourceName = "limits.memory"
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
)
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
type ResourceQuotaScope string
const (
// Match all pod objects where spec.activeDeadlineSeconds
ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
// Match all pod objects where !spec.activeDeadlineSeconds
ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
// Match all pod objects that have best effort quality of service
ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
// Match all pod objects that do not have best effort quality of service
ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
)
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
type ResourceQuotaSpec struct {
// Hard is the set of desired hard limits for each named resource.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// A collection of filters that must match each object tracked by a quota.
// If not specified, the quota matches all objects.
// +optional
Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"`
}
// ResourceQuotaStatus defines the enforced hard limits and observed use.
type ResourceQuotaStatus struct {
// Hard is the set of enforced hard limits for each named resource.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// Used is the current observed total usage of the resource in the namespace.
// +optional
Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuota sets aggregate quota restrictions enforced per namespace
type ResourceQuota struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired quota.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status defines the actual enforced quota and its current usage.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuotaList is a list of ResourceQuota items.
type ResourceQuotaList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of ResourceQuota objects.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md
Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Secret holds secret data of a certain type. The total bytes of the values in
// the Data field must be less than MaxSecretSize bytes.
type Secret struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data contains the secret data. Each key must consist of alphanumeric
// characters, '-', '_' or '.'. The serialized form of the secret data is a
// base64 encoded string, representing the arbitrary (possibly non-string)
// data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
// +optional
Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
// stringData allows specifying non-binary secret data in string form.
// It is provided as a write-only convenience method.
// All keys and values are merged into the data field on write, overwriting any existing values.
// It is never output when reading from the API.
// +k8s:conversion-gen=false
// +optional
StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"`
// Used to facilitate programmatic handling of secret data.
// +optional
Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"`
}
const MaxSecretSize = 1 * 1024 * 1024
type SecretType string
const (
// SecretTypeOpaque is the default. Arbitrary user-defined data
SecretTypeOpaque SecretType = "Opaque"
// SecretTypeServiceAccountToken contains a token that identifies a service account to the API
//
// Required fields:
// - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies
// - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies
// - Secret.Data["token"] - a token that identifies the service account to the API
SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
// ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountNameKey = "kubernetes.io/service-account.name"
// ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountUIDKey = "kubernetes.io/service-account.uid"
// ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets
ServiceAccountTokenKey = "token"
// ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets
ServiceAccountKubeconfigKey = "kubernetes.kubeconfig"
// ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets
ServiceAccountRootCAKey = "ca.crt"
// ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls
ServiceAccountNamespaceKey = "namespace"
// SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg
//
// Required fields:
// - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file
SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg"
// DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets
DockerConfigKey = ".dockercfg"
// SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json
//
// Required fields:
// - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file
SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson"
// DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets
DockerConfigJsonKey = ".dockerconfigjson"
// SecretTypeBasicAuth contains data needed for basic authentication.
//
// Required at least one of fields:
// - Secret.Data["username"] - username used for authentication
// - Secret.Data["password"] - password or token needed for authentication
SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth"
// BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets
BasicAuthUsernameKey = "username"
// BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets
BasicAuthPasswordKey = "password"
// SecretTypeSSHAuth contains data needed for SSH authetication.
//
// Required field:
// - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication
SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth"
// SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets
SSHAuthPrivateKey = "ssh-privatekey"
// SecretTypeTLS contains information about a TLS client or server secret. It
// is primarily used with TLS termination of the Ingress resource, but may be
// used in other types.
//
// Required fields:
// - Secret.Data["tls.key"] - TLS private key.
// Secret.Data["tls.crt"] - TLS certificate.
// TODO: Consider supporting different formats, specifying CA/destinationCA.
SecretTypeTLS SecretType = "kubernetes.io/tls"
// TLSCertKey is the key for tls certificates in a TLS secert.
TLSCertKey = "tls.crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls.key"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SecretList is a list of Secret.
type SecretList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of secret objects.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMap holds configuration data for pods to consume.
type ConfigMap struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data contains the configuration data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// +optional
Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMapList is a resource containing a list of ConfigMap objects.
type ConfigMapList struct {
metav1.TypeMeta `json:",inline"`
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of ConfigMaps.
Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Type and constants for component health validation.
type ComponentConditionType string
// These are the valid conditions for the component.
const (
ComponentHealthy ComponentConditionType = "Healthy"
)
// Information about the condition of a component.
type ComponentCondition struct {
// Type of condition for a component.
// Valid value: "Healthy"
Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"`
// Status of the condition for a component.
// Valid values for "Healthy": "True", "False", or "Unknown".
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Message about the condition for a component.
// For example, information about a health check.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// Condition error code for a component.
// For example, a health check error code.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
type ComponentStatus struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of component conditions observed
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Status of all the conditions for the component as a list of ComponentStatus objects.
type ComponentStatusList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ComponentStatus objects.
Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// DownwardAPIVolumeSource represents a volume containing downward API info.
// Downward API volumes support ownership management and SELinux relabeling.
type DownwardAPIVolumeSource struct {
// Items is a list of downward API volume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
const (
DownwardAPIVolumeSourceDefaultMode int32 = 0644
)
// DownwardAPIVolumeFile represents information to create the file containing the pod field
type DownwardAPIVolumeFile struct {
// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"`
}
// Represents downward API info for projecting into a projected volume.
// Note that this is identical to a downwardAPI volume source without the default
// mode.
type DownwardAPIProjection struct {
// Items is a list of DownwardAPIVolume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
}
// SecurityContext holds security configuration that will be applied to a container.
// Some fields are present in both SecurityContext and PodSecurityContext. When both
// are set, the values in SecurityContext take precedence.
type SecurityContext struct {
// The capabilities to add/drop when running containers.
// Defaults to the default set of capabilities granted by the container runtime.
// +optional
Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"`
// Run container in privileged mode.
// Processes in privileged containers are essentially equivalent to root on the host.
// Defaults to false.
// +optional
Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"`
// The SELinux context to be applied to the container.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"`
// Whether this container has a read-only root filesystem.
// Default is false.
// +optional
ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"`
// AllowPrivilegeEscalation controls whether a process can gain more
// privileges than its parent process. This bool directly controls if
// the no_new_privs flag will be set on the container process.
// AllowPrivilegeEscalation is true always when the container is:
// 1) run as Privileged
// 2) has CAP_SYS_ADMIN
// +optional
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
}
// SELinuxOptions are the labels to be applied to the container
type SELinuxOptions struct {
// User is a SELinux user label that applies to the container.
// +optional
User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"`
// Role is a SELinux role label that applies to the container.
// +optional
Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"`
// Type is a SELinux type label that applies to the container.
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"`
// Level is SELinux level label that applies to the container.
// +optional
Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RangeAllocation is not a public type.
type RangeAllocation struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Range is string that identifies the range represented by 'data'.
Range string `json:"range" protobuf:"bytes,2,opt,name=range"`
// Data is a bit array containing all allocated addresses in the previous segment.
Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"`
}
const (
// "default-scheduler" is the name of default scheduler.
DefaultSchedulerName = "default-scheduler"
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule.
// When the --hard-pod-affinity-weight scheduler flag is not specified,
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
DefaultHardPodAffinitySymmetricWeight int = 1
)
// Sysctl defines a kernel parameter to be set
type Sysctl struct {
// Name of a property to set
Name string `protobuf:"bytes,1,opt,name=name"`
// Value of a property to set
Value string `protobuf:"bytes,2,opt,name=value"`
}
// NodeResources is an object for conveying resource information about a node.
// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
type NodeResources struct {
// Capacity represents the available resources of a node
Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
}
const (
// Enable stdin for remote command execution
ExecStdinParam = "input"
// Enable stdout for remote command execution
ExecStdoutParam = "output"
// Enable stderr for remote command execution
ExecStderrParam = "error"
// Enable TTY for remote command execution
ExecTTYParam = "tty"
// Command to run for remote command execution
ExecCommandParam = "command"
// Name of header that specifies stream type
StreamType = "streamType"
// Value for streamType header for stdin stream
StreamTypeStdin = "stdin"
// Value for streamType header for stdout stream
StreamTypeStdout = "stdout"
// Value for streamType header for stderr stream
StreamTypeStderr = "stderr"
// Value for streamType header for data stream
StreamTypeData = "data"
// Value for streamType header for error stream
StreamTypeError = "error"
// Value for streamType header for terminal resize stream
StreamTypeResize = "resize"
// Name of header that specifies the port being forwarded
PortHeader = "port"
// Name of header that specifies a request ID used to associate the error
// and data streams for a single forwarded connection
PortForwardRequestIDHeader = "requestID"
)
fix kube-proxy panic
Kubernetes-commit: 1faa6f56b9c026b58d17e9163bf0b26a8b4086b4
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
)
// The comments for the structs and fields can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored and not exported to the SwaggerAPI.
//
// The aforementioned methods can be generated by hack/update-generated-swagger-docs.sh
// Common string formats
// ---------------------
// Many fields in this API have formatting requirements. The commonly used
// formats are defined here.
//
// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier"
// in the C language. This is captured by the following regex:
// [A-Za-z_][A-Za-z0-9_]*
// This defines the format, but not the length restriction, which should be
// specified at the definition of any field of this type.
//
// DNS_LABEL: This is a string, no more than 63 characters long, that conforms
// to the definition of a "label" in RFCs 1035 and 1123. This is captured
// by the following regex:
// [a-z0-9]([-a-z0-9]*[a-z0-9])?
//
// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms
// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured
// by the following regex:
// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*
// or more simply:
// DNS_LABEL(\.DNS_LABEL)*
//
// IANA_SVC_NAME: This is a string, no more than 15 characters long, that
// conforms to the definition of IANA service name in RFC 6335.
// It must contains at least one letter [a-z] and it must contains only [a-z0-9-].
// Hypens ('-') cannot be leading or trailing character of the string
// and cannot be adjacent to other hyphens.
// ObjectMeta is metadata that all persisted resources must have, which includes all objects
// users must create.
// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon.
// +k8s:openapi-gen=false
type ObjectMeta struct {
// Name must be unique within a namespace. Is required when creating resources, although
// some resources may allow a client to request the generation of an appropriate name
// automatically. Name is primarily intended for creation idempotence and configuration
// definition.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// GenerateName is an optional prefix, used by the server, to generate a unique
// name ONLY IF the Name field has not been provided.
// If this field is used, the name returned to the client will be different
// than the name passed. This value will also be combined with a unique suffix.
// The provided value has the same validation rules as the Name field,
// and may be truncated by the length of the suffix required to make the value
// unique on the server.
//
// If this field is specified and the generated name exists, the server will
// NOT return a 409 - instead, it will either return 201 Created or 500 with Reason
// ServerTimeout indicating a unique name could not be found in the time allotted, and the client
// should retry (optionally after the time indicated in the Retry-After header).
//
// Applied only if Name is not specified.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency
// +optional
GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"`
// Namespace defines the space within each name must be unique. An empty namespace is
// equivalent to the "default" namespace, but "default" is the canonical representation.
// Not all objects are required to be scoped to a namespace - the value of this field for
// those objects will be empty.
//
// Must be a DNS_LABEL.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"`
// SelfLink is a URL representing this object.
// Populated by the system.
// Read-only.
// +optional
SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"`
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
//
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// An opaque value that represents the internal version of this object that can
// be used by clients to determine when objects have changed. May be used for optimistic
// concurrency, change detection, and the watch operation on a resource or set of resources.
// Clients must treat these values as opaque and passed unmodified back to the server.
// They may only be valid for a particular resource or set of resources.
//
// Populated by the system.
// Read-only.
// Value must be treated as opaque by clients and .
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// A sequence number representing a specific generation of the desired state.
// Populated by the system. Read-only.
// +optional
Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"`
// CreationTimestamp is a timestamp representing the server time when this object was
// created. It is not guaranteed to be set in happens-before order across separate operations.
// Clients may not set this value. It is represented in RFC3339 form and is in UTC.
//
// Populated by the system.
// Read-only.
// Null for lists.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
CreationTimestamp metav1.Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"`
// DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This
// field is set by the server when a graceful deletion is requested by the user, and is not
// directly settable by a client. The resource is expected to be deleted (no longer visible
// from resource lists, and not reachable by name) after the time in this field. Once set,
// this value may not be unset or be set further into the future, although it may be shortened
// or the resource may be deleted prior to this time. For example, a user may request that
// a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination
// signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard
// termination signal (SIGKILL) to the container and after cleanup, remove the pod from the
// API. In the presence of network partitions, this object may still exist after this
// timestamp, until an administrator or automated process can determine the resource is
// fully terminated.
// If not set, graceful deletion of the object has not been requested.
//
// Populated by the system when a graceful deletion is requested.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
DeletionTimestamp *metav1.Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"`
// Number of seconds allowed for this object to gracefully terminate before
// it will be removed from the system. Only set when deletionTimestamp is also set.
// May only be shortened.
// Read-only.
// +optional
DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"`
// Map of string keys and values that can be used to organize and categorize
// (scope and select) objects. May match selectors of replication controllers
// and services.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
// +optional
Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"`
// Annotations is an unstructured key value map stored with a resource that may be
// set by external tools to store and retrieve arbitrary metadata. They are not
// queryable and should be preserved when modifying objects.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
// +optional
Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"`
// List of objects depended by this object. If ALL objects in the list have
// been deleted, this object will be garbage collected. If this object is managed by a controller,
// then an entry in this list will point to this controller, with the controller field set to true.
// There cannot be more than one managing controller.
// +optional
// +patchMergeKey=uid
// +patchStrategy=merge
OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"`
// An initializer is a controller which enforces some system invariant at object creation time.
// This field is a list of initializers that have not yet acted on this object. If nil or empty,
// this object has been completely initialized. Otherwise, the object is considered uninitialized
// and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to
// observe uninitialized objects.
//
// When an object is created, the system will populate this list with the current set of initializers.
// Only privileged users may set or modify this list. Once it is empty, it may not be modified further
// by any user.
Initializers *metav1.Initializers `json:"initializers,omitempty" patchStrategy:"merge" protobuf:"bytes,16,rep,name=initializers"`
// Must be empty before the object is deleted from the registry. Each entry
// is an identifier for the responsible component that will remove the entry
// from the list. If the deletionTimestamp of the object is non-nil, entries
// in this list can only be removed.
// +optional
// +patchStrategy=merge
Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
// The name of the cluster which the object belongs to.
// This is used to distinguish resources with same name and namespace in different clusters.
// This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.
// +optional
ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"`
}
const (
// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
NamespaceDefault string = "default"
// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
NamespaceAll string = ""
)
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
type Volume struct {
// Volume's name.
// Must be a DNS_LABEL and unique within the pod.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// VolumeSource represents the location and type of the mounted volume.
// If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"`
}
// Represents the source of a volume to mount.
// Only one of its members may be specified.
type VolumeSource struct {
// HostPath represents a pre-existing file or directory on the host
// machine that is directly exposed to the container. This is generally
// used for system agents or other privileged things that are allowed
// to see the host machine. Most containers will NOT need this.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// ---
// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
// mount host directories as read/write.
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"`
// EmptyDir represents a temporary directory that shares a pod's lifetime.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
// GitRepo represents a git repository at a particular revision.
// +optional
GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"`
// Secret represents a secret that should populate this volume.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"`
// NFS represents an NFS mount on the host that shares a pod's lifetime
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
// Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
// PersistentVolumeClaimVolumeSource represents a reference to a
// PersistentVolumeClaim in the same namespace.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an
// alpha feature and may change in future.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
// Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
// DownwardAPI represents downward API about the pod that should populate this volume
// +optional
DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
// ConfigMap represents a configMap that should populate this volume
// +optional
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
// Items for all in one resources secrets, configmaps, and downward API
Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
// StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
// +optional
StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
// This volume finds the bound PV and mounts that volume for the pod. A
// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
// type of volume that is owned by someone else (the system).
type PersistentVolumeClaimVolumeSource struct {
// ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"`
// Will force the ReadOnly setting in VolumeMounts.
// Default false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
}
// PersistentVolumeSource is similar to VolumeSource but meant for the
// administrator who creates PVs. Exactly one of its members must be set.
type PersistentVolumeSource struct {
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
// HostPath represents a directory on the host.
// Provisioned by a developer or tester.
// This is useful for single-node development and testing only!
// On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
// Glusterfs represents a Glusterfs volume that is attached to a host and
// exposed to the pod. Provisioned by an admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
// NFS represents an NFS mount on the host. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
// Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an
// alpha feature and may change in future.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
// Local represents directly-attached storage with node affinity
// +optional
Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
// StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
// More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
}
const (
// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation.
// It's currently still used and will be held for backwards compatibility
BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
// MountOptionAnnotation defines mount option annotation used in PVs
MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
// AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume.
// Value is a string of the json representation of type NodeAffinity
AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
// It is analogous to a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
type PersistentVolume struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines a specification of a persistent volume owned by the cluster.
// Provisioned by an administrator.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information/status for the persistent volume.
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// PersistentVolumeSpec is the specification of a persistent volume.
type PersistentVolumeSpec struct {
// A description of the persistent volume's resources and capacity.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// The actual volume backing the persistent volume.
PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"`
// AccessModes contains all ways the volume can be mounted.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
// Expected to be non-nil when bound.
// claim.VolumeName is the authoritative bind between PV and PVC.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
// +optional
ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"`
// What happens to a persistent volume when released from its claim.
// Valid options are Retain (default) and Recycle.
// Recycling must be supported by the volume plugin underlying this persistent volume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
// +optional
PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
// Name of StorageClass to which this persistent volume belongs. Empty value
// means that this volume does not belong to any StorageClass.
// +optional
StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"`
// A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
// simply fail if one is invalid.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
// +optional
MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
}
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
type PersistentVolumeReclaimPolicy string
const (
// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
// The volume plugin must support Recycling.
PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
// The volume plugin must support Deletion.
PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
// PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator.
// The default policy is Retain.
PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
)
// PersistentVolumeStatus is the current status of a persistent volume.
type PersistentVolumeStatus struct {
// Phase indicates if a volume is available, bound to a claim, or released by a claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
// +optional
Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"`
// A human-readable message indicating details about why the volume is in this state.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
// Reason is a brief CamelCase string that describes any failure and is meant
// for machine parsing and tidy display in the CLI.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeList is a list of PersistentVolume items.
type PersistentVolumeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of persistent volumes.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaim is a user's request for and claim to a persistent volume
type PersistentVolumeClaim struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired characteristics of a volume requested by a pod author.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information/status of a persistent volume claim.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
type PersistentVolumeClaimList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// A list of persistent volume claims.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PersistentVolumeClaimSpec describes the common attributes of storage devices
// and allows a Source for provider-specific attributes
type PersistentVolumeClaimSpec struct {
// AccessModes contains the desired access modes the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// A label query over volumes to consider for binding.
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
// Resources represents the minimum resources the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
// VolumeName is the binding reference to the PersistentVolume backing this claim.
// +optional
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
// Name of the StorageClass required by the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
// +optional
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
}
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
type PersistentVolumeClaimStatus struct {
// Phase represents the current phase of PersistentVolumeClaim.
// +optional
Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"`
// AccessModes contains the actual access modes the volume backing the PVC has.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// Represents the actual resources of the underlying volume.
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
}
type PersistentVolumeAccessMode string
const (
// can be mounted read/write mode to exactly 1 host
ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
// can be mounted in read-only mode to many hosts
ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
// can be mounted in read/write mode to many hosts
ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
)
type PersistentVolumePhase string
const (
// used for PersistentVolumes that are not available
VolumePending PersistentVolumePhase = "Pending"
// used for PersistentVolumes that are not yet bound
// Available volumes are held by the binder and matched to PersistentVolumeClaims
VolumeAvailable PersistentVolumePhase = "Available"
// used for PersistentVolumes that are bound
VolumeBound PersistentVolumePhase = "Bound"
// used for PersistentVolumes where the bound PersistentVolumeClaim was deleted
// released volumes must be recycled before becoming available again
// this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource
VolumeReleased PersistentVolumePhase = "Released"
// used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim
VolumeFailed PersistentVolumePhase = "Failed"
)
type PersistentVolumeClaimPhase string
const (
// used for PersistentVolumeClaims that are not yet bound
ClaimPending PersistentVolumeClaimPhase = "Pending"
// used for PersistentVolumeClaims that are bound
ClaimBound PersistentVolumeClaimPhase = "Bound"
// used for PersistentVolumeClaims that lost their underlying
// PersistentVolume. The claim was bound to a PersistentVolume and this
// volume does not exist any longer and all data on it was lost.
ClaimLost PersistentVolumeClaimPhase = "Lost"
)
type HostPathType string
const (
// For backwards compatible, leave it empty if unset
HostPathUnset HostPathType = ""
// If nothing exists at the given path, an empty directory will be created there
// as needed with file mode 0755, having the same group and ownership with Kubelet.
HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate"
// A directory must exist at the given path
HostPathDirectory HostPathType = "Directory"
// If nothing exists at the given path, an empty file will be created there
// as needed with file mode 0644, having the same group and ownership with Kubelet.
HostPathFileOrCreate HostPathType = "FileOrCreate"
// A file must exist at the given path
HostPathFile HostPathType = "File"
// A UNIX socket must exist at the given path
HostPathSocket HostPathType = "Socket"
// A character device must exist at the given path
HostPathCharDev HostPathType = "CharDevice"
// A block device must exist at the given path
HostPathBlockDev HostPathType = "BlockDevice"
)
// Represents a host path mapped into a pod.
// Host path volumes do not support ownership management or SELinux relabeling.
type HostPathVolumeSource struct {
// Path of the directory on the host.
// If the path is a symlink, it will follow the link to the real path.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Type for HostPath Volume
// Defaults to ""
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
}
// Represents an empty directory for a pod.
// Empty directory volumes support ownership management and SELinux relabeling.
type EmptyDirVolumeSource struct {
// What type of storage medium should back this directory.
// The default is "" which means to use the node's default medium.
// Must be an empty string (default) or Memory.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"`
// Total amount of local storage required for this EmptyDir volume.
// The size limit is also applicable for memory medium.
// The maximum usage on memory medium EmptyDir would be the minimum value between
// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
// The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// +optional
SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"`
}
// Represents a Glusterfs mount that lasts the lifetime of a pod.
// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsVolumeSource struct {
// EndpointsName is the endpoint name that details Glusterfs topology.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
// Path is the Glusterfs volume path.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDVolumeSource struct {
// A collection of Ceph monitors.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// The rados image name.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// The rados pool name.
// Default is rbd.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// The rados user name.
// Default is admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// Keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// SecretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
// Cinder volumes support ownership management and SELinux relabeling.
type CinderVolumeSource struct {
// volume id used to identify the volume in cinder
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// Optional: User is the rados user name, default is admin
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// SecretReference represents a Secret Reference. It has enough information to retrieve secret
// in any namespace
type SecretReference struct {
// Name is unique within a namespace to reference a secret resource.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Namespace defines the space within which the secret name must be unique.
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSPersistentVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// Optional: User is the rados user name, default is admin
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// Represents a Flocker volume mounted by the Flocker agent.
// One and only one of datasetName and datasetUUID should be set.
// Flocker volumes do not support ownership management or SELinux relabeling.
type FlockerVolumeSource struct {
// Name of the dataset stored as metadata -> name on the dataset for Flocker
// should be considered as deprecated
// +optional
DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"`
// UUID of the dataset. This is unique identifier of a Flocker dataset
// +optional
DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"`
}
// StorageMedium defines ways that storage can be allocated to a volume.
type StorageMedium string
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node
StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs)
)
// Protocol defines network protocols supported for things like container ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
)
// Represents a Persistent Disk resource in Google Compute Engine.
//
// A GCE PD must exist before mounting to a container. The disk must
// also be in the same GCE project and zone as the kubelet. A GCE PD
// can only be mounted as read/write once or read-only many times. GCE
// PDs support ownership management and SELinux relabeling.
type GCEPersistentDiskVolumeSource struct {
// Unique name of the PD resource in GCE. Used to identify the disk in GCE.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// The partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a Quobyte mount that lasts the lifetime of a pod.
// Quobyte volumes do not support ownership management or SELinux relabeling.
type QuobyteVolumeSource struct {
// Registry represents a single or multiple Quobyte Registry services
// specified as a string as host:port pair (multiple entries are separated with commas)
// which acts as the central registry for volumes
Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"`
// Volume is a string that references an already created Quobyte volume by name.
Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"`
// ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// User to map volume access to
// Defaults to serivceaccount user
// +optional
User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"`
// Group to map volume access to
// Default is no group
// +optional
Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"`
}
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
type FlexVolumeSource struct {
// Driver is the name of the driver to use for this volume.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: SecretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: Extra command options if any.
// +optional
Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
}
// Represents a Persistent Disk resource in AWS.
//
// An AWS EBS disk must exist before mounting to a container. The disk
// must also be in the same AWS zone as the kubelet. An AWS EBS disk
// can only be mounted as read/write once. AWS EBS volumes support
// ownership management and SELinux relabeling.
type AWSElasticBlockStoreVolumeSource struct {
// Unique ID of the persistent disk resource in AWS (Amazon EBS volume).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// The partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// Specify "true" to force and set the ReadOnly property in VolumeMounts to "true".
// If omitted, the default is "false".
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a volume that is populated with the contents of a git repository.
// Git repo volumes do not support ownership management.
// Git repo volumes support SELinux relabeling.
type GitRepoVolumeSource struct {
// Repository URL
Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"`
// Commit hash for the specified revision.
// +optional
Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
// Target directory name.
// Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
// git repository. Otherwise, if specified, the volume will contain the git repository in
// the subdirectory with the given name.
// +optional
Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"`
}
// Adapts a Secret into a volume.
//
// The contents of the target Secret's Data field will be presented in a volume
// as files using the keys in the Data field as the file names.
// Secret volumes support ownership management and SELinux relabeling.
type SecretVolumeSource struct {
// Name of the secret in the pod's namespace to use.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"`
// Specify whether the Secret or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
SecretVolumeSourceDefaultMode int32 = 0644
)
// Adapts a secret into a projected volume.
//
// The contents of the target Secret's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names.
// Note that this is identical to a secret volume source without the default
// mode.
type SecretProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Specify whether the Secret or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents an NFS mount that lasts the lifetime of a pod.
// NFS volumes do not support ownership management or SELinux relabeling.
type NFSVolumeSource struct {
// Server is the hostname or IP address of the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Server string `json:"server" protobuf:"bytes,1,opt,name=server"`
// Path that is exported by the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// ReadOnly here will force
// the NFS export to be mounted with read-only permissions.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIVolumeSource struct {
// iSCSI target portal. The portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// Target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// iSCSI target lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// iSCSI target portal List. The portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// CHAP secret for iSCSI target and initiator authentication
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// Custom iSCSI initiator name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// Represents a Fibre Channel volume.
// Fibre Channel volumes can only be mounted as read/write once.
// Fibre Channel volumes support ownership management and SELinux relabeling.
type FCVolumeSource struct {
// Optional: FC target worldwide names (WWNs)
// +optional
TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"`
// Optional: FC target lun number
// +optional
Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: FC volume world wide identifiers (wwids)
// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
// +optional
WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFileVolumeSource struct {
// the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFilePersistentVolumeSource struct {
// the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// the namespace of the secret that contains Azure Storage Account Name and Key
// default is the same as the Pod
// +optional
SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"`
}
// Represents a vSphere volume resource.
type VsphereVirtualDiskVolumeSource struct {
// Path that identifies vSphere volume vmdk
VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Storage Policy Based Management (SPBM) profile name.
// +optional
StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"`
// Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
// +optional
StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"`
}
// Represents a Photon Controller persistent disk resource.
type PhotonPersistentDiskVolumeSource struct {
// ID that identifies Photon Controller persistent disk
PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
type AzureDataDiskCachingMode string
type AzureDataDiskKind string
const (
AzureDataDiskCachingNone AzureDataDiskCachingMode = "None"
AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly"
AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite"
AzureSharedBlobDisk AzureDataDiskKind = "Shared"
AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated"
AzureManagedDisk AzureDataDiskKind = "Managed"
)
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
type AzureDiskVolumeSource struct {
// The Name of the data disk in the blob storage
DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"`
// The URI the data disk in the blob storage
DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
// Host Caching mode: None, Read Only, Read Write.
// +optional
CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
// Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
}
// PortworxVolumeSource represents a Portworx volume resource.
type PortworxVolumeSource struct {
// VolumeID uniquely identifies a Portworx volume
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// FSType represents the filesystem type to mount
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// ScaleIOVolumeSource represents a persistent ScaleIO volume
type ScaleIOVolumeSource struct {
// The host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// The name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// The name of the Protection Domain for the configured storage (defaults to "default").
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// The Storage Pool associated with the protection domain (defaults to "default").
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be thick or thin (defaults to "thin").
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// VolumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// SecretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSPersistentVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// VolumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// SecretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// volume as files using the keys in the Data field as the file names, unless
// the items element is populated with specific mappings of keys to paths.
// ConfigMap volumes support ownership management and SELinux relabeling.
type ConfigMapVolumeSource struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
ConfigMapVolumeSourceDefaultMode int32 = 0644
)
// Adapts a ConfigMap into a projected volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names,
// unless the items element is populated with specific mappings of keys to paths.
// Note that this is identical to a configmap volume source without the default
// mode.
type ConfigMapProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents a projected volume source
type ProjectedVolumeSource struct {
// list of volume projections
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
// Mode bits to use on created files by default. Must be a value between
// 0 and 0777.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
// Projection that may be projected along with other supported volume types
type VolumeProjection struct {
// all types below are the supported types for projection into the same volume
// information about the secret data to project
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
// information about the downwardAPI data to project
DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"`
// information about the configMap data to project
ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
}
const (
ProjectedVolumeSourceDefaultMode int32 = 0644
)
// Maps a string key to a path within a volume.
type KeyToPath struct {
// The key to project.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// The relative path of the file to map the key to.
// May not be an absolute path.
// May not contain the path element '..'.
// May not start with the string '..'.
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
}
// Local represents directly-attached storage with node affinity
type LocalVolumeSource struct {
// The full path to the volume on the node
// For alpha, this path must be a directory
// Once block as a source is supported, then this path can point to a block device
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
}
// ContainerPort represents a network port in a single container.
type ContainerPort struct {
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
// named port in a pod must have a unique name. Name for the port that can be
// referred to by services.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Number of port to expose on the host.
// If specified, this must be a valid port number, 0 < x < 65536.
// If HostNetwork is specified, this must match ContainerPort.
// Most containers do not need this.
// +optional
HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"`
// Number of port to expose on the pod's IP address.
// This must be a valid port number, 0 < x < 65536.
ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"`
// Protocol for port. Must be UDP or TCP.
// Defaults to "TCP".
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
// What host IP to bind the external port to.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
}
// VolumeMount describes a mounting of a Volume within a container.
type VolumeMount struct {
// This must match the Name of a Volume.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Mounted read-only if true, read-write otherwise (false or unspecified).
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
// Path within the container at which the volume should be mounted. Must
// not contain ':'.
MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"`
// Path within the volume from which the container's volume should be mounted.
// Defaults to "" (volume's root).
// +optional
SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"`
// mountPropagation determines how mounts are propagated from the host
// to container and the other way around.
// When not set, MountPropagationHostToContainer is used.
// This field is alpha in 1.8 and can be reworked or removed in a future
// release.
// +optional
MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"`
}
// MountPropagationMode describes mount propagation.
type MountPropagationMode string
const (
// MountPropagationHostToContainer means that the volume in a container will
// receive new mounts from the host or other containers, but filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rslave" in Linux terminology).
MountPropagationHostToContainer MountPropagationMode = "HostToContainer"
// MountPropagationBidirectional means that the volume in a container will
// receive new mounts from the host or other containers, and its own mounts
// will be propagated from the container to the host or other containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rshared" in Linux terminology).
MountPropagationBidirectional MountPropagationMode = "Bidirectional"
)
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Name of the environment variable. Must be a C_IDENTIFIER.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Optional: no more than one of the following may be specified.
// Variable references $(VAR_NAME) are expanded
// using the previous defined environment variables in the container and
// any service environment variables. If a variable cannot be resolved,
// the reference in the input string will be unchanged. The $(VAR_NAME)
// syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
// references will never be expanded, regardless of whether the variable
// exists or not.
// Defaults to "".
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Source for the environment variable's value. Cannot be used if value is not empty.
// +optional
ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"`
}
// EnvVarSource represents a source for the value of an EnvVar.
type EnvVarSource struct {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"`
// Selects a key of a ConfigMap.
// +optional
ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"`
// Selects a key of a secret in the pod's namespace
// +optional
SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
}
// ObjectFieldSelector selects an APIVersioned field of an object.
type ObjectFieldSelector struct {
// Version of the schema the FieldPath is written in terms of, defaults to "v1".
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
// Path of the field to select in the specified API version.
FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"`
}
// ResourceFieldSelector represents container resources (cpu, memory) and their output format
type ResourceFieldSelector struct {
// Container name: required for volumes, optional for env vars
// +optional
ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
// Required: resource to select
Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
// Specifies the output format of the exposed resources, defaults to "1"
// +optional
Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"`
}
// Selects a key from a ConfigMap.
type ConfigMapKeySelector struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key to select.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the ConfigMap or it's key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// SecretKeySelector selects a key of a Secret.
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key of the secret to select from. Must be a valid secret key.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the Secret or it's key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// EnvFromSource represents the source of a set of ConfigMaps
type EnvFromSource struct {
// An optional identifer to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
// +optional
Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
// The ConfigMap to select from
// +optional
ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"`
// The Secret to select from
// +optional
SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
}
// ConfigMapEnvSource selects a ConfigMap to populate the environment
// variables with.
//
// The contents of the target ConfigMap's Data field will represent the
// key-value pairs as environment variables.
type ConfigMapEnvSource struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the ConfigMap must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// SecretEnvSource selects a Secret to populate the environment
// variables with.
//
// The contents of the target Secret's Data field will represent the
// key-value pairs as environment variables.
type SecretEnvSource struct {
// The Secret to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the Secret must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// HTTPHeader describes a custom header to be used in HTTP probes
type HTTPHeader struct {
// The header field name
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// The header field value
Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
}
// HTTPGetAction describes an action based on HTTP Get requests.
type HTTPGetAction struct {
// Path to access on the HTTP server.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
// Name or number of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"`
// Host name to connect to, defaults to the pod IP. You probably want to set
// "Host" in httpHeaders instead.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"`
// Scheme to use for connecting to the host.
// Defaults to HTTP.
// +optional
Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"`
// Custom headers to set in the request. HTTP allows repeated headers.
// +optional
HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"`
}
// URIScheme identifies the scheme used for connection to a host for Get actions
type URIScheme string
const (
// URISchemeHTTP means that the scheme used will be http://
URISchemeHTTP URIScheme = "HTTP"
// URISchemeHTTPS means that the scheme used will be https://
URISchemeHTTPS URIScheme = "HTTPS"
)
// TCPSocketAction describes an action based on opening a socket
type TCPSocketAction struct {
// Number or name of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"`
// Optional: Host name to connect to, defaults to the pod IP.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// ExecAction describes a "run in container" action.
type ExecAction struct {
// Command is the command line to execute inside the container, the working directory for the
// command is root ('/') in the container's filesystem. The command is simply exec'd, it is
// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
// a shell, you need to explicitly call out to that shell.
// Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
}
// Probe describes a health check to be performed against a container to determine whether it is
// alive or ready to receive traffic.
type Probe struct {
// The action taken to determine the health of a container
Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"`
// Number of seconds after the container has started before liveness probes are initiated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"`
// Number of seconds after which the probe times out.
// Defaults to 1 second. Minimum value is 1.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
// How often (in seconds) to perform the probe.
// Default to 10 seconds. Minimum value is 1.
// +optional
PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"`
// Minimum consecutive successes for the probe to be considered successful after having failed.
// Defaults to 1. Must be 1 for liveness. Minimum value is 1.
// +optional
SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"`
// Minimum consecutive failures for the probe to be considered failed after having succeeded.
// Defaults to 3. Minimum value is 1.
// +optional
FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"`
}
// PullPolicy describes a policy for if/when to pull a container image
type PullPolicy string
const (
// PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
PullAlways PullPolicy = "Always"
// PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
PullNever PullPolicy = "Never"
// PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
PullIfNotPresent PullPolicy = "IfNotPresent"
)
// TerminationMessagePolicy describes how termination messages are retrieved from a container.
type TerminationMessagePolicy string
const (
// TerminationMessageReadFile is the default behavior and will set the container status message to
// the contents of the container's terminationMessagePath when the container exits.
TerminationMessageReadFile TerminationMessagePolicy = "File"
// TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs
// for the container status message when the container exits with an error and the
// terminationMessagePath has no contents.
TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError"
)
// Capability represent POSIX capabilities type
type Capability string
// Adds and removes POSIX capabilities from running containers.
type Capabilities struct {
// Added capabilities
// +optional
Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"`
// Removed capabilities
// +optional
Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"`
}
// ResourceRequirements describes the compute resource requirements.
type ResourceRequirements struct {
// Limits describes the maximum amount of compute resources allowed.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
// Requests describes the minimum amount of compute resources required.
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
// otherwise to an implementation-defined value.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
}
const (
// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
TerminationMessagePathDefault string = "/dev/termination-log"
)
// A single application container that you want to run within a pod.
type Container struct {
// Name of the container specified as a DNS_LABEL.
// Each container in a pod must have a unique name (DNS_LABEL).
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Docker image name.
// More info: https://kubernetes.io/docs/concepts/containers/images
Image string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The docker image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The docker image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Container's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of ports to expose from the container. Exposing a port here gives
// the system additional information about the network connections a
// container uses, but is primarily informational. Not specifying a port here
// DOES NOT prevent that port from being exposed. Any port which is
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Cannot be updated.
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
// Compute Resources required by this container.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// Periodic probe of container liveness.
// Container will be restarted if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Periodic probe of container service readiness.
// Container will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// Actions that the management system should take in response to container lifecycle events.
// Cannot be updated.
// +optional
Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Optional: Path at which the file to which the container's termination message
// will be written is mounted into the container's filesystem.
// Message written is intended to be brief final status, such as an assertion failure message.
// Will be truncated by the node if greater than 4096 bytes. The total message length across
// all containers will be limited to 12kb.
// Defaults to /dev/termination-log.
// Cannot be updated.
// +optional
TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Indicate how the termination message should be populated. File will use the contents of
// terminationMessagePath to populate the container status message on both success and failure.
// FallbackToLogsOnError will use the last chunk of container log output if the termination
// message file is empty and the container exited with an error.
// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
// Defaults to File.
// Cannot be updated.
// +optional
TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// Security options the pod should run with.
// More info: https://kubernetes.io/docs/concepts/policy/security-context/
// More info: https://git.k8s.io/community/contributors/design-proposals/security_context.md
// +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
// and shouldn't be used for general purpose containers.
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
// Default is false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the container is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
// +optional
StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
// Default is false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
}
// Handler defines a specific action that should be taken
// TODO: pass structured data to these actions, and document that data here.
type Handler struct {
// One and only one of the following should be specified.
// Exec specifies the action to take.
// +optional
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
// HTTPGet specifies the http request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
// TCPSocket specifies an action involving a TCP port.
// TCP hooks not yet supported
// TODO: implement a realistic TCP lifecycle hook
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
}
// Lifecycle describes actions that the management system should take in response to container lifecycle
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
// until the action is complete, unless the container process fails, in which case the handler is aborted.
type Lifecycle struct {
// PostStart is called immediately after a container is created. If the handler fails,
// the container is terminated and restarted according to its restart policy.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"`
// PreStop is called immediately before a container is terminated.
// The container is terminated after the handler completes.
// The reason for termination is passed to the handler.
// Regardless of the outcome of the handler, the container is eventually terminated.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
}
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// ContainerStateWaiting is a waiting state of a container.
type ContainerStateWaiting struct {
// (brief) reason the container is not yet running.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"`
// Message regarding why the container is not yet running.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
}
// ContainerStateRunning is a running state of a container.
type ContainerStateRunning struct {
// Time at which the container was last (re-)started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"`
}
// ContainerStateTerminated is a terminated state of a container.
type ContainerStateTerminated struct {
// Exit status from the last termination of the container
ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"`
// Signal from the last termination of the container
// +optional
Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"`
// (brief) reason from the last termination of the container
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Message regarding the last termination of the container
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// Time at which previous execution of the container started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"`
// Time at which the container last terminated
// +optional
FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"`
// Container's ID in the format 'docker://<container_id>'
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"`
}
// ContainerState holds a possible state of container.
// Only one of its members may be specified.
// If none of them is specified, the default one is ContainerStateWaiting.
type ContainerState struct {
// Details about a waiting container
// +optional
Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"`
// Details about a running container
// +optional
Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"`
// Details about a terminated container
// +optional
Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"`
}
// ContainerStatus contains details for the current status of this container.
type ContainerStatus struct {
// This must be a DNS_LABEL. Each container in a pod must have a unique name.
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Details about the container's current condition.
// +optional
State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"`
// Details about the container's last termination condition.
// +optional
LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"`
// Specifies whether the container has passed its readiness probe.
Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"`
// The number of times the container has been restarted, currently based on
// the number of dead containers that have not yet been removed.
// Note that this is calculated from dead containers. But those containers are subject to
// garbage collection. This value will get capped at 5 by GC.
RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"`
// The image the container is running.
// More info: https://kubernetes.io/docs/concepts/containers/images
// TODO(dchen1107): Which image the container is running with?
Image string `json:"image" protobuf:"bytes,6,opt,name=image"`
// ImageID of the container's image.
ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"`
// Container's ID in the format 'docker://<container_id>'.
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"`
}
// PodPhase is a label for the condition of a pod at the current time.
type PodPhase string
// These are the valid statuses of pods.
const (
// PodPending means the pod has been accepted by the system, but one or more of the containers
// has not been started. This includes time before being bound to a node, as well as time spent
// pulling images onto the host.
PodPending PodPhase = "Pending"
// PodRunning means the pod has been bound to a node and all of the containers have been started.
// At least one container is still running or is in the process of being restarted.
PodRunning PodPhase = "Running"
// PodSucceeded means that all containers in the pod have voluntarily terminated
// with a container exit code of 0, and the system is not going to restart any of these containers.
PodSucceeded PodPhase = "Succeeded"
// PodFailed means that all containers in the pod have terminated, and at least one container has
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
PodFailed PodPhase = "Failed"
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
// to an error in communicating with the host of the pod.
PodUnknown PodPhase = "Unknown"
)
// PodConditionType is a valid value for PodCondition.Type
type PodConditionType string
// These are valid conditions of pod.
const (
// PodScheduled represents status of the scheduling process for this pod.
PodScheduled PodConditionType = "PodScheduled"
// PodReady means the pod is able to service requests and should be added to the
// load balancing pools of all matching services.
PodReady PodConditionType = "Ready"
// PodInitialized means that all init containers in the pod have started successfully.
PodInitialized PodConditionType = "Initialized"
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
PodReasonUnschedulable = "Unschedulable"
)
// PodCondition contains details for the current condition of this pod.
type PodCondition struct {
// Type is the type of the condition.
// Currently only Ready.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
// Status is the status of the condition.
// Can be True, False, Unknown.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// RestartPolicy describes how the container should be restarted.
// Only one of the following restart policies may be specified.
// If none of the following policies is specified, the default one
// is RestartPolicyAlways.
type RestartPolicy string
const (
RestartPolicyAlways RestartPolicy = "Always"
RestartPolicyOnFailure RestartPolicy = "OnFailure"
RestartPolicyNever RestartPolicy = "Never"
)
// DNSPolicy defines how a pod's DNS will be configured.
type DNSPolicy string
const (
// DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS
// first, if it is available, then fall back on the default
// (as determined by kubelet) DNS settings.
DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet"
// DNSClusterFirst indicates that the pod should use cluster DNS
// first unless hostNetwork is true, if it is available, then
// fall back on the default (as determined by kubelet) DNS settings.
DNSClusterFirst DNSPolicy = "ClusterFirst"
// DNSDefault indicates that the pod should use the default (as
// determined by kubelet) DNS settings.
DNSDefault DNSPolicy = "Default"
DefaultTerminationGracePeriodSeconds = 30
)
// A node selector represents the union of the results of one or more label queries
// over a set of nodes; that is, it represents the OR of the selectors represented
// by the node selector terms.
type NodeSelector struct {
//Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
}
// A null or empty node selector term matches no objects.
type NodeSelectorTerm struct {
//Required. A list of node selector requirements. The requirements are ANDed.
MatchExpressions []NodeSelectorRequirement `json:"matchExpressions" protobuf:"bytes,1,rep,name=matchExpressions"`
}
// A node selector requirement is a selector that contains values, a key, and an operator
// that relates the key and values.
type NodeSelectorRequirement struct {
// The label key that the selector applies to.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Represents a key's relationship to a set of values.
// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"`
// An array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty. If the operator is Gt or Lt, the values
// array must have a single element, which will be interpreted as an integer.
// This array is replaced during a strategic merge patch.
// +optional
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
// A node selector operator is the set of operators that can be used in
// a node selector requirement.
type NodeSelectorOperator string
const (
NodeSelectorOpIn NodeSelectorOperator = "In"
NodeSelectorOpNotIn NodeSelectorOperator = "NotIn"
NodeSelectorOpExists NodeSelectorOperator = "Exists"
NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
NodeSelectorOpGt NodeSelectorOperator = "Gt"
NodeSelectorOpLt NodeSelectorOperator = "Lt"
)
// Affinity is a group of affinity scheduling rules.
type Affinity struct {
// Describes node affinity scheduling rules for the pod.
// +optional
NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"`
// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"`
// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"`
}
// Pod affinity is a group of inter pod affinity scheduling rules.
type PodAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
type PodAntiAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the anti-affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling anti-affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
type WeightedPodAffinityTerm struct {
// weight associated with matching the corresponding podAffinityTerm,
// in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// Required. A pod affinity term, associated with the corresponding weight.
PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"`
}
// Defines a set of pods (namely those matching the labelSelector
// relative to the given namespace(s)) that this pod should be
// co-located (affinity) or not co-located (anti-affinity) with,
// where co-located is defined as running on a node whose value of
// the label with key <topologyKey> tches that of any node on which
// a pod of the set of pods is running
type PodAffinityTerm struct {
// A label query over a set of resources, in this case pods.
// +optional
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
// namespaces specifies which namespaces the labelSelector applies to (matches against);
// null or empty list means "this pod's namespace"
Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"`
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
// whose value of the label with key topologyKey matches that of any node on which any of the
// selected pods is running.
// For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies"
// ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains);
// for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.
// +optional
TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"`
}
// Node affinity is a group of node affinity scheduling rules.
type NodeAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// will try to eventually evict the pod from its node.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// may or may not try to eventually evict the pod from its node.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node matches the corresponding matchExpressions; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// An empty preferred scheduling term matches all objects with implicit weight 0
// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
type PreferredSchedulingTerm struct {
// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// A node selector term, associated with the corresponding weight.
Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
}
// The node this Taint is attached to has the "effect" on
// any pod that does not tolerate the Taint.
type Taint struct {
// Required. The taint key to be applied to a node.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Required. The taint value corresponding to the taint key.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Required. The effect of the taint on pods
// that do not tolerate the taint.
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
// TimeAdded represents the time at which the taint was added.
// It is only written for NoExecute taints.
// +optional
TimeAdded metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
}
type TaintEffect string
const (
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
// Enforced by the scheduler.
TaintEffectNoSchedule TaintEffect = "NoSchedule"
// Like TaintEffectNoSchedule, but the scheduler tries not to schedule
// new pods onto the node, rather than prohibiting new pods from scheduling
// onto the node entirely. Enforced by the scheduler.
TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// Like TaintEffectNoSchedule, but additionally do not allow pods submitted to
// Kubelet without going through the scheduler to start.
// Enforced by Kubelet and the scheduler.
// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
// Evict any already-running pods that do not tolerate the taint.
// Currently enforced by NodeController.
TaintEffectNoExecute TaintEffect = "NoExecute"
)
// The pod this Toleration is attached to tolerates any taint that matches
// the triple <key,value,effect> using the matching operator <operator>.
type Toleration struct {
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
// +optional
Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
// Operator represents a key's relationship to the value.
// Valid operators are Exists and Equal. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
// +optional
Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
// Value is the taint value the toleration matches to.
// If the operator is Exists, the value should be empty, otherwise just a regular string.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
// Effect indicates the taint effect to match. Empty means match all taint effects.
// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
// +optional
Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
// TolerationSeconds represents the period of time the toleration (which must be
// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
// it is not set, which means tolerate the taint forever (do not evict). Zero and
// negative values will be treated as 0 (evict immediately) by the system.
// +optional
TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"`
}
// A toleration operator is the set of operators that can be used in a toleration.
type TolerationOperator string
const (
TolerationOpExists TolerationOperator = "Exists"
TolerationOpEqual TolerationOperator = "Equal"
)
const (
// This annotation key will be used to contain an array of v1 JSON encoded Containers
// for init containers. The annotation will be placed into the internal type and cleared.
// This key is only recognized by version >= 1.4.
PodInitContainersBetaAnnotationKey = "pod.beta.kubernetes.io/init-containers"
// This annotation key will be used to contain an array of v1 JSON encoded Containers
// for init containers. The annotation will be placed into the internal type and cleared.
// This key is recognized by version >= 1.3. For version 1.4 code, this key
// will have its value copied to the beta key.
PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers"
// This annotation key will be used to contain an array of v1 JSON encoded
// ContainerStatuses for init containers. The annotation will be placed into the internal
// type and cleared. This key is only recognized by version >= 1.4.
PodInitContainerStatusesBetaAnnotationKey = "pod.beta.kubernetes.io/init-container-statuses"
// This annotation key will be used to contain an array of v1 JSON encoded
// ContainerStatuses for init containers. The annotation will be placed into the internal
// type and cleared. This key is recognized by version >= 1.3. For version 1.4 code,
// this key will have its value copied to the beta key.
PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses"
)
// PodSpec is a description of a pod.
type PodSpec struct {
// List of volumes that can be mounted by containers belonging to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes
// +optional
// +patchMergeKey=name
// +patchStrategy=merge,retainKeys
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
// List of initialization containers belonging to the pod.
// Init containers are executed in order prior to containers being started. If any
// init container fails, the pod is considered to have failed and is handled according
// to its restartPolicy. The name for an init container or normal container must be
// unique among all containers.
// Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
// of that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
// +patchMergeKey=name
// +patchStrategy=merge
InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
// List of containers belonging to the pod.
// Containers cannot currently be added or removed.
// There must be at least one container in a Pod.
// Cannot be updated.
// +patchMergeKey=name
// +patchStrategy=merge
Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
// Restart policy for all containers within the pod.
// One of Always, OnFailure, Never.
// Default to Always.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
// +optional
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
// Value must be non-negative integer. The value zero indicates delete immediately.
// If this value is nil, the default grace period will be used instead.
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
// Defaults to 30 seconds.
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
// Optional duration in seconds the pod may be active on the node relative to
// StartTime before the system will actively try to mark it failed and kill associated containers.
// Value must be a positive integer.
// +optional
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
// Set DNS policy for containers within the pod.
// One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'.
// Defaults to "ClusterFirst".
// To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.
// +optional
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
// Deprecated: Use serviceAccountName instead.
// +k8s:conversion-gen=false
// +optional
DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"`
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
// requirements.
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
// Host networking requested for this pod. Use the host's network namespace.
// If this option is set, the ports that will be used must be specified.
// Default to false.
// +k8s:conversion-gen=false
// +optional
HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
// Use the host's pid namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
// Use the host's ipc namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
// If specified, these secrets will be passed to individual puller implementations for them to use. For example,
// in the case of docker, only DockerConfig type secrets are honored.
// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
// Specifies the hostname of the Pod
// If not specified, the pod's hostname will be set to a system-defined value.
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
// If not specified, the pod will not have a domainname at all.
// +optional
Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
// If specified, the pod's scheduling constraints
// +optional
Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
// If specified, the pod will be dispatched by specified scheduler.
// If not specified, the pod will be dispatched by default scheduler.
// +optional
SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
// If specified, the pod's tolerations.
// +optional
Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
// file if specified. This is only valid for non-hostNetwork pods.
// +optional
// +patchMergeKey=ip
// +patchStrategy=merge
HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"`
// If specified, indicates the pod's priority. "SYSTEM" is a special keyword
// which indicates the highest priority. Any other name must be defined by
// creating a PriorityClass object with that name.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"`
// The priority value. Various system components use this field to find the
// priority of the pod. When Priority Admission Controller is enabled, it
// prevents users from setting this field. The admission controller populates
// this field from PriorityClassName.
// The higher the value, the higher the priority.
// +optional
Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"`
}
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
// pod's hosts file.
type HostAlias struct {
// IP address of the host file entry.
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostnames for the above IP address.
Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
}
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
type PodSecurityContext struct {
// The SELinux context to be applied to all containers.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in SecurityContext. If set in
// both SecurityContext and PodSecurityContext, the value specified in SecurityContext
// takes precedence for that container.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence
// for that container.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
// A list of groups applied to the first process run in each container, in addition
// to the container's primary GID. If unspecified, no groups will be added to
// any container.
// +optional
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
// A special supplemental group that applies to all containers in a pod.
// Some volume types allow the Kubelet to change the ownership of that volume
// to be owned by the pod:
//
// 1. The owning GID will be the FSGroup
// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
// 3. The permission bits are OR'd with rw-rw----
//
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
// +optional
FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"`
}
// PodQOSClass defines the supported qos classes of Pods.
type PodQOSClass string
const (
// PodQOSGuaranteed is the Guaranteed qos class.
PodQOSGuaranteed PodQOSClass = "Guaranteed"
// PodQOSBurstable is the Burstable qos class.
PodQOSBurstable PodQOSClass = "Burstable"
// PodQOSBestEffort is the BestEffort qos class.
PodQOSBestEffort PodQOSClass = "BestEffort"
)
// PodStatus represents information about the status of a pod. Status may trail the actual
// state of a system.
type PodStatus struct {
// Current condition of the pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
// +optional
Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"`
// Current service state of pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
// A human readable message indicating details about why the pod is in this condition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// A brief CamelCase message indicating details about why the pod is in this state.
// e.g. 'Evicted'
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// IP address of the host to which the pod is assigned. Empty if not yet scheduled.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
// IP address allocated to the pod. Routable at least within the cluster.
// Empty if not yet allocated.
// +optional
PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
// RFC 3339 date and time at which the object was acknowledged by the Kubelet.
// This is before the Kubelet pulled the container image(s) for the pod.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
// The list has one entry per init container in the manifest. The most recent successful
// init container will have ready = true, the most recently started container will have
// startTime set.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
// The list has one entry per container in the manifest. Each entry is currently the output
// of `docker inspect`.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
// +optional
ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
// The Quality of Service (QOS) classification assigned to the pod based on resource requirements
// See PodQOSClass type for available QOS classes
// More info: https://github.com/kubernetes/kubernetes/blob/master/docs/design/resource-qos.md
// +optional
QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
type PodStatusResult struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Pod is a collection of containers that can run on a host. This resource is created
// by clients and scheduled onto hosts.
type Pod struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodList is a list of Pods.
type PodList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pods.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md
Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PodTemplateSpec describes the data a pod should have when created from a template
type PodTemplateSpec struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplate describes a template for creating copies of a predefined pod.
type PodTemplate struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Template defines the pods that will be created from this pod template.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplateList is a list of PodTemplates.
type PodTemplateList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pod templates
Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ReplicationControllerSpec is the specification of a replication controller.
type ReplicationControllerSpec struct {
// Replicas is the number of desired replicas.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// Selector is a label query over pods that should match the Replicas count.
// If Selector is empty, it is defaulted to the labels present on the Pod template.
// Label keys and values that must match in order to be controlled by this replication
// controller, if empty defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// TemplateRef is a reference to an object that describes the pod that will be created if
// insufficient replicas are detected.
// Reference to an object that describes the pod that will be created if insufficient replicas are detected.
// +optional
// TemplateRef *ObjectReference `json:"templateRef,omitempty"`
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. This takes precedence over a TemplateRef.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
// +optional
Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// ReplicationControllerStatus represents the current status of a replication
// controller.
type ReplicationControllerStatus struct {
// Replicas is the most recently oberved number of replicas.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
// The number of pods that have labels matching the labels of the pod template of the replication controller.
// +optional
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
// The number of ready replicas for this replication controller.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
// The number of available replicas (ready for at least minReadySeconds) for this replication controller.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
// ObservedGeneration reflects the generation of the most recently observed replication controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
// Represents the latest available observations of a replication controller's current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
}
type ReplicationControllerConditionType string
// These are valid conditions of a replication controller.
const (
// ReplicationControllerReplicaFailure is added in a replication controller when one of its pods
// fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors,
// etc. or deleted due to kubelet being down or finalizers are failing.
ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure"
)
// ReplicationControllerCondition describes the state of a replication controller at a certain point.
type ReplicationControllerCondition struct {
// Type of replication controller condition.
Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationController represents the configuration of a replication controller.
type ReplicationController struct {
metav1.TypeMeta `json:",inline"`
// If the Labels of a ReplicationController are empty, they are defaulted to
// be the same as the Pod(s) that the replication controller manages.
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the specification of the desired behavior of the replication controller.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status is the most recently observed status of the replication controller.
// This data may be out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationControllerList is a collection of replication controllers.
type ReplicationControllerList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of replication controllers.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Session Affinity Type string
type ServiceAffinity string
const (
// ServiceAffinityClientIP is the Client IP based.
ServiceAffinityClientIP ServiceAffinity = "ClientIP"
// ServiceAffinityNone - no session affinity.
ServiceAffinityNone ServiceAffinity = "None"
)
const DefaultClientIPServiceAffinitySeconds int32 = 10800
// SessionAffinityConfig represents the configurations of session affinity.
type SessionAffinityConfig struct {
// clientIP contains the configurations of Client IP based session affinity.
// +optional
ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"`
}
// ClientIPConfig represents the configurations of Client IP based session affinity.
type ClientIPConfig struct {
// timeoutSeconds specifies the seconds of ClientIP type session sticky time.
// The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
// Default value is 10800(for 3 hours).
// +optional
TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
}
// Service Type string describes ingress methods for a service
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the cluster IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
// ServiceTypeExternalName means a service consists of only a reference to
// an external name that kubedns or equivalent will return as a CNAME
// record, with no exposing or proxying of any pods involved.
ServiceTypeExternalName ServiceType = "ExternalName"
)
// Service External Traffic Policy Type string
type ServiceExternalTrafficPolicyType string
const (
// ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior.
ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local"
// ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior.
ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
)
// ServiceStatus represents the current status of a service.
type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
// +optional
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
}
// LoadBalancerStatus represents the status of a load-balancer.
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer.
// Traffic intended for the service should be sent to these ingress points.
// +optional
Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
// +optional
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
}
// ServiceSpec describes the attributes that a user creates on a service.
type ServiceSpec struct {
// The list of ports that are exposed by this service.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +patchMergeKey=port
// +patchStrategy=merge
Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
// Route service traffic to pods with label keys and values matching this
// selector. If empty or not present, the service is assumed to have an
// external process managing its endpoints, which Kubernetes will not
// modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
// Ignored if type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// clusterIP is the IP address of the service and is usually assigned
// randomly by the master. If an address is specified manually and is not in
// use by others, it will be allocated to the service; otherwise, creation
// of the service will fail. This field can not be changed through updates.
// Valid values are "None", empty string (""), or a valid IP address. "None"
// can be specified for headless services when proxying is not required.
// Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if
// type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"`
// type determines how the Service is exposed. Defaults to ClusterIP. Valid
// options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
// "ExternalName" maps to the specified externalName.
// "ClusterIP" allocates a cluster-internal IP address for load-balancing to
// endpoints. Endpoints are determined by the selector or if that is not
// specified, by manual construction of an Endpoints object. If clusterIP is
// "None", no virtual IP is allocated and the endpoints are published as a
// set of endpoints rather than a stable IP.
// "NodePort" builds on ClusterIP and allocates a port on every node which
// routes to the clusterIP.
// "LoadBalancer" builds on NodePort and creates an
// external load-balancer (if supported in the current cloud) which routes
// to the clusterIP.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
// +optional
Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
// externalIPs is a list of IP addresses for which nodes in the cluster
// will also accept traffic for this service. These IPs are not managed by
// Kubernetes. The user is responsible for ensuring that traffic arrives
// at a node with this IP. A common example is external load-balancers
// that are not part of the Kubernetes system.
// +optional
ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"`
// Supports "ClientIP" and "None". Used to maintain session affinity.
// Enable client IP based session affinity.
// Must be ClientIP or None.
// Defaults to None.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"`
// Only applies to Service Type: LoadBalancer
// LoadBalancer will get created with the IP specified in this field.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
// +optional
LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
// If specified and supported by the platform, this will restrict traffic through the cloud-provider
// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
// cloud-provider does not support the feature."
// More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
// +optional
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
// externalName is the external reference that kubedns or equivalent will
// return as a CNAME record for this service. No proxying will be involved.
// Must be a valid DNS name and requires Type to be ExternalName.
// +optional
ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"`
// externalTrafficPolicy denotes if this Service desires to route external
// traffic to node-local or cluster-wide endpoints. "Local" preserves the
// client source IP and avoids a second hop for LoadBalancer and Nodeport
// type services, but risks potentially imbalanced traffic spreading.
// "Cluster" obscures the client source IP and may cause a second hop to
// another node, but should have good overall load-spreading.
// +optional
ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"`
// healthCheckNodePort specifies the healthcheck nodePort for the service.
// If not specified, HealthCheckNodePort is created by the service api
// backend with the allocated nodePort. Will use user-specified nodePort value
// if specified by the client. Only effects when Type is set to LoadBalancer
// and ExternalTrafficPolicy is set to Local.
// +optional
HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"`
// publishNotReadyAddresses, when set to true, indicates that DNS implementations
// must publish the notReadyAddresses of subsets for the Endpoints associated with
// the Service. The default value is false.
// The primary use case for setting this field is to use a StatefulSet's Headless Service
// to propagate SRV records for its Pods without respect to their readiness for purpose
// of peer discovery.
// This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints
// when that annotation is deprecated and all clients have been converted to use this
// field.
// +optional
PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
// sessionAffinityConfig contains the configurations of session affinity.
// +optional
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
}
// ServicePort contains information on service's port.
type ServicePort struct {
// The name of this port within the service. This must be a DNS_LABEL.
// All ports within a ServiceSpec must have unique names. This maps to
// the 'Name' field in EndpointPort objects.
// Optional if only one ServicePort is defined on this service.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The IP protocol for this port. Supports "TCP" and "UDP".
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
// The port that will be exposed by this service.
Port int32 `json:"port" protobuf:"varint,3,opt,name=port"`
// Number or name of the port to access on the pods targeted by the service.
// Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
// If this is a string, it will be looked up as a named port in the
// target Pod's container ports. If this is not specified, the value
// of the 'port' field is used (an identity map).
// This field is ignored for services with clusterIP=None, and should be
// omitted or set equal to the 'port' field.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
// +optional
TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"`
// The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
// Usually assigned by the system. If specified, it will be allocated to the service
// if unused or else creation of the service will fail.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
// +optional
NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Service is a named abstraction of software service (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the selector that determines which pods
// will answer requests sent through the proxy.
type Service struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a service.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the service.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
const (
// ClusterIPNone - do not assign a cluster IP
// no proxying required and no environment variables should be created for pods
ClusterIPNone = "None"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceList holds a list of services.
type ServiceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of services
Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccount binds together:
// * a name, understood by users, and perhaps by peripheral systems, for an identity
// * a principal that can be authenticated and authorized
// * a set of secrets
type ServiceAccount struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
// in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
// can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
// More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
// +optional
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"`
// AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
// Can be overridden at the pod level.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccountList is a list of ServiceAccount objects
type ServiceAccountList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ServiceAccounts.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Endpoints is a collection of endpoints that implement the actual service. Example:
// Name: "mysvc",
// Subsets: [
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// },
// {
// Addresses: [{"ip": "10.10.3.3"}],
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
type Endpoints struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The set of all endpoints is the union of all subsets. Addresses are placed into
// subsets according to the IPs they share. A single address with multiple ports,
// some of which are ready and some of which are not (because they come from
// different containers) will result in the address being displayed in different
// subsets for the different ports. No address will appear in both Addresses and
// NotReadyAddresses in the same subset.
// Sets of addresses and ports that comprise a service.
Subsets []EndpointSubset `json:"subsets" protobuf:"bytes,2,rep,name=subsets"`
}
// EndpointSubset is a group of addresses with a common set of ports. The
// expanded set of endpoints is the Cartesian product of Addresses x Ports.
// For example, given:
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// }
// The resulting set of endpoints can be viewed as:
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
type EndpointSubset struct {
// IP addresses which offer the related ports that are marked as ready. These endpoints
// should be considered safe for load balancers and clients to utilize.
// +optional
Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"`
// IP addresses which offer the related ports but are not currently marked as ready
// because they have not yet finished starting, have recently failed a readiness check,
// or have recently failed a liveness check.
// +optional
NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"`
// Port numbers available on the related IP addresses.
// +optional
Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
}
// EndpointAddress is a tuple that describes single IP address.
type EndpointAddress struct {
// The IP of this endpoint.
// May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
// or link-local multicast ((224.0.0.0/24).
// IPv6 is also accepted but not fully supported on all platforms. Also, certain
// kubernetes components, like kube-proxy, are not IPv6 ready.
// TODO: This should allow hostname or IP, See #4447.
IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
// The Hostname of this endpoint
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
// Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
// +optional
NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"`
// Reference to object providing the endpoint.
// +optional
TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"`
}
// EndpointPort is a tuple that describes a single port.
type EndpointPort struct {
// The name of this port (corresponds to ServicePort.Name).
// Must be a DNS_LABEL.
// Optional only if one port is defined.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The port number of the endpoint.
Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
// The IP protocol for this port.
// Must be UDP or TCP.
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EndpointsList is a list of endpoints.
type EndpointsList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of endpoints.
Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// NodeSpec describes the attributes that a node is created with.
type NodeSpec struct {
// PodCIDR represents the pod IP range assigned to the node.
// +optional
PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
// External ID of the node assigned by some machine database (e.g. a cloud provider).
// Deprecated.
// +optional
ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
// ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
// +optional
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
// +optional
Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
// If specified, the node's taints.
// +optional
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
// If specified, the source to get node configuration from
// The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
// +optional
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
type NodeConfigSource struct {
metav1.TypeMeta `json:",inline"`
ConfigMapRef *ObjectReference `json:"configMapRef,omitempty" protobuf:"bytes,1,opt,name=configMapRef"`
}
// DaemonEndpoint contains information about a single Daemon endpoint.
type DaemonEndpoint struct {
/*
The port tag was not properly in quotes in earlier releases, so it must be
uppercased for backwards compat (since it was falling back to var name of
'Port').
*/
// Port number of the given endpoint.
Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"`
}
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
type NodeDaemonEndpoints struct {
// Endpoint on which Kubelet is listening.
// +optional
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"`
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
// Kubelet Version reported by the node.
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
// KubeProxy Version reported by the node.
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
}
// NodeStatus is information about the current status of a node.
type NodeStatus struct {
// Capacity represents the total resources of a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Allocatable represents the resources of a node that are available for scheduling.
// Defaults to Capacity.
// +optional
Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"`
// NodePhase is the recently observed lifecycle phase of the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
// The field is never populated, and now is deprecated.
// +optional
Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
// Conditions is an array of current observed node conditions.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
// List of addresses reachable to the node.
// Queried from cloud provider, if available.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
// Endpoints of daemons running on the Node.
// +optional
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
// Set of ids/uuids to uniquely identify the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#info
// +optional
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
// List of container images on this node
// +optional
Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
// List of attachable volumes in use (mounted) by the node.
// +optional
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
// List of volumes that are attached to the node.
// +optional
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
}
type UniqueVolumeName string
// AttachedVolume describes a volume attached to a node
type AttachedVolume struct {
// Name of the attached volume
Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"`
// DevicePath represents the device path where the volume should be available
DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"`
}
// AvoidPods describes pods that should avoid this node. This is the value for a
// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and
// will eventually become a field of NodeStatus.
type AvoidPods struct {
// Bounded-sized list of signatures of pods that should avoid this node, sorted
// in timestamp order from oldest to newest. Size of the slice is unspecified.
// +optional
PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"`
}
// Describes a class of pods that should avoid this node.
type PreferAvoidPodsEntry struct {
// The class of pods.
PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"`
// Time at which this entry was added to the list.
// +optional
EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"`
// (brief) reason why this entry was added to the list.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Human readable message indicating why this entry was added to the list.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
}
// Describes the class of pods that should avoid this node.
// Exactly one field should be set.
type PodSignature struct {
// Reference to controller whose pods should avoid this node.
// +optional
PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"`
}
// Describe a container image
type ContainerImage struct {
// Names by which this image is known.
// e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
Names []string `json:"names" protobuf:"bytes,1,rep,name=names"`
// The size of the image in bytes.
// +optional
SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"`
}
type NodePhase string
// These are the valid phases of node.
const (
// NodePending means the node has been created/added by the system, but not configured.
NodePending NodePhase = "Pending"
// NodeRunning means the node has been configured and has Kubernetes components running.
NodeRunning NodePhase = "Running"
// NodeTerminated means the node has been removed from the cluster.
NodeTerminated NodePhase = "Terminated"
)
type NodeConditionType string
// These are valid conditions of node. Currently, we don't have enough information to decide
// node condition. In the future, we will add more. The proposed set of conditions are:
// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable.
const (
// NodeReady means kubelet is healthy and ready to accept pods.
NodeReady NodeConditionType = "Ready"
// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
// space on the node.
NodeOutOfDisk NodeConditionType = "OutOfDisk"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
NodeDiskPressure NodeConditionType = "DiskPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
)
// NodeCondition contains condition information for a node.
type NodeCondition struct {
// Type of node condition.
Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we got an update on a given condition.
// +optional
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"`
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
type NodeAddressType string
// These are valid address type of node.
const (
NodeHostName NodeAddressType = "Hostname"
NodeExternalIP NodeAddressType = "ExternalIP"
NodeInternalIP NodeAddressType = "InternalIP"
NodeExternalDNS NodeAddressType = "ExternalDNS"
NodeInternalDNS NodeAddressType = "InternalDNS"
)
// NodeAddress contains information for the node's address.
type NodeAddress struct {
// Node address type, one of Hostname, ExternalIP or InternalIP.
Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"`
// The node address.
Address string `json:"address" protobuf:"bytes,2,opt,name=address"`
}
// ResourceName is the name identifying various resources in a ResourceList.
type ResourceName string
// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
// with the -, _, and . characters allowed anywhere, except the first or last character.
// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
// camel case, separating compound words.
// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
const (
// CPU, in cores. (500m = .5 cores)
ResourceCPU ResourceName = "cpu"
// Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceMemory ResourceName = "memory"
// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
ResourceStorage ResourceName = "storage"
// Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceEphemeralStorage is alpha and it can change across releases.
ResourceEphemeralStorage ResourceName = "ephemeral-storage"
// NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned.
ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu"
)
const (
// Namespace prefix for opaque counted resources (alpha).
ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-"
// Default namespace prefix.
ResourceDefaultNamespacePrefix = "kubernetes.io/"
)
// ResourceList is a set of (resource name, quantity) pairs.
type ResourceList map[ResourceName]resource.Quantity
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Node is a worker node in Kubernetes.
// Each node will have a unique identifier in the cache (i.e. in etcd).
type Node struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a node.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the node.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeList is the whole list of all Nodes which have been registered with master.
type NodeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of nodes
Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// FinalizerName is the name identifying a finalizer during namespace lifecycle.
type FinalizerName string
// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or
// in metav1.
const (
FinalizerKubernetes FinalizerName = "kubernetes"
)
// NamespaceSpec describes the attributes on a Namespace.
type NamespaceSpec struct {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
// More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#finalizers
// +optional
Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"`
}
// NamespaceStatus is information about the current status of a Namespace.
type NamespaceStatus struct {
// Phase is the current lifecycle phase of the namespace.
// More info: https://git.k8s.io/community/contributors/design-proposals/namespaces.md#phases
// +optional
Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"`
}
type NamespacePhase string
// These are the valid phases of a namespace.
const (
// NamespaceActive means the namespace is available for use in the system
NamespaceActive NamespacePhase = "Active"
// NamespaceTerminating means the namespace is undergoing graceful termination
NamespaceTerminating NamespacePhase = "Terminating"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Namespace provides a scope for Names.
// Use of multiple namespaces is optional.
type Namespace struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of the Namespace.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status describes the current status of a Namespace.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NamespaceList is a list of Namespaces.
type NamespaceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of Namespace objects in the list.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
// Deprecated in 1.7, please use the bindings subresource of pods instead.
type Binding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The target object that you want to bind to the standard object.
Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"`
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
// +k8s:openapi-gen=false
type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
// DeletionPropagation decides if a deletion will propagate to the dependents of the object, and how the garbage collector will handle the propagation.
type DeletionPropagation string
const (
// Orphans the dependents.
DeletePropagationOrphan DeletionPropagation = "Orphan"
// Deletes the object from the key-value store, the garbage collector will delete the dependents in the background.
DeletePropagationBackground DeletionPropagation = "Background"
// The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store.
// API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp.
// This policy is cascading, i.e., the dependents will be deleted with Foreground.
DeletePropagationForeground DeletionPropagation = "Foreground"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DeleteOptions may be provided when deleting an API object
// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
// +k8s:openapi-gen=false
type DeleteOptions struct {
metav1.TypeMeta `json:",inline"`
// The duration in seconds before the object should be deleted. Value must be non-negative integer.
// The value zero indicates delete immediately. If this value is nil, the default grace period for the
// specified type will be used.
// Defaults to a per object value if not specified. zero means delete immediately.
// +optional
GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"`
// Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be
// returned.
// +optional
Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"`
// Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7.
// Should the dependent objects be orphaned. If true/false, the "orphan"
// finalizer will be added to/removed from the object's finalizers list.
// Either this field or PropagationPolicy may be set, but not both.
// +optional
OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"`
// Whether and how garbage collection will be performed.
// Either this field or OrphanDependents may be set, but not both.
// The default policy is decided by the existing finalizer set in the
// metadata.finalizers and the resource-specific default policy.
// +optional
PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ListOptions is the query options to a standard REST list call.
// DEPRECATED: This type has been moved to meta/v1 and will be removed soon.
// +k8s:openapi-gen=false
type ListOptions struct {
metav1.TypeMeta `json:",inline"`
// A selector to restrict the list of returned objects by their labels.
// Defaults to everything.
// +optional
LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
// A selector to restrict the list of returned objects by their fields.
// Defaults to everything.
// +optional
FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"`
// If true, partially initialized resources are included in the response.
// +optional
IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"`
// Watch for changes to the described resources and return them as a stream of
// add, update, and remove notifications. Specify resourceVersion.
// +optional
Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"`
// When specified with a watch call, shows changes that occur after that particular version of a resource.
// Defaults to changes from the beginning of history.
// When specified for list:
// - if unset, then the result is returned from remote storage based on quorum-read flag;
// - if it's 0, then we simply return what we currently have in cache, no guarantee;
// - if set to non zero, then the result is at least as fresh as given rv.
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
// Timeout for the list/watch call.
// +optional
TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodLogOptions is the query options for a Pod's logs REST call.
type PodLogOptions struct {
metav1.TypeMeta `json:",inline"`
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
// Follow the log stream of the pod. Defaults to false.
// +optional
Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
// Return previous terminated container logs. Defaults to false.
// +optional
Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
// A relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
// An RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output. Defaults to false.
// +optional
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
// If set, the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
// +optional
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
// If set, the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
// +optional
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodAttachOptions is the query options to a Pod's remote attach call.
// ---
// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodAttachOptions struct {
metav1.TypeMeta `json:",inline"`
// Stdin if true, redirects the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Stdout if true indicates that stdout is to be redirected for the attach call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Stderr if true indicates that stderr is to be redirected for the attach call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the attach call.
// This is passed through the container runtime so the tty
// is allocated on the worker node by the container runtime.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// The container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodExecOptions is the query options to a Pod's remote exec call.
// ---
// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodExecOptions struct {
metav1.TypeMeta `json:",inline"`
// Redirect the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Redirect the standard output stream of the pod for this call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Redirect the standard error stream of the pod for this call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the exec call.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// Container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
// Command is the remote command to execute. argv array. Not executed within a shell.
Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodPortForwardOptions is the query options to a Pod's port forward call
// when using WebSockets.
// The `port` query parameter must specify the port or
// ports (comma separated) to forward over.
// Port forwarding over SPDY does not use these options. It requires the port
// to be passed in the `port` header as part of request.
type PodPortForwardOptions struct {
metav1.TypeMeta `json:",inline"`
// List of ports to forward
// Required when using WebSockets
// +optional
Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodProxyOptions is the query options to a Pod's proxy call.
type PodProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to pod.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeProxyOptions is the query options to a Node's proxy call.
type NodeProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to node.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceProxyOptions is the query options to a Service's proxy call.
type ServiceProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the part of URLs that include service endpoints, suffixes,
// and parameters to use for the current proxy request to service.
// For example, the whole request URL is
// http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
// Path is _search?q=user:kimchy.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ObjectReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
// Namespace of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
// UID of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// API version of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"`
// Specific resourceVersion to which this reference is made, if any.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// If referring to a piece of an object instead of an entire object, this string
// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
// For example, if the object reference is to a container within a pod, this would take on a value like:
// "spec.containers{name}" (where "name" refers to the name of the container that triggered
// the event) or if no container name is specified "spec.containers[2]" (container with
// index 2 in this pod). This syntax is chosen only to have some well-defined way of
// referencing a part of an object.
// TODO: this design is not final and this field is subject to change in the future.
// +optional
FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"`
}
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// TODO: Add other useful fields. apiVersion, kind, uid?
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SerializedReference is a reference to serialized object.
type SerializedReference struct {
metav1.TypeMeta `json:",inline"`
// The reference to an object in the system.
// +optional
Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
}
// EventSource contains information for an event.
type EventSource struct {
// Component from which the event is generated.
// +optional
Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
// Node name on which the event is generated.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// Valid values for event types (new types could be added in future)
const (
// Information only and will not cause any problems
EventTypeNormal string = "Normal"
// These events are to warn that something might go wrong
EventTypeWarning string = "Warning"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Event is a report of an event somewhere in the cluster.
// TODO: Decide whether to store these separately or with the object they apply to.
type Event struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
// The object that this event is about.
InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"`
// This should be a short, machine understandable string that gives the reason
// for the transition into the object's current status.
// TODO: provide exact specification for format.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// A human-readable description of the status of this operation.
// TODO: decide on maximum length.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// The component reporting this event. Should be a short machine understandable string.
// +optional
Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"`
// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
// +optional
FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"`
// The time at which the most recent occurrence of this event was recorded.
// +optional
LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"`
// The number of times this event has occurred.
// +optional
Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"`
// Type of this event (Normal, Warning), new types could be added in the future
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EventList is a list of events.
type EventList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of events
Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// List holds a list of objects, which may not be known by the server.
type List metav1.List
// LimitType is a type of object that is limited
type LimitType string
const (
// Limit that applies to all pods in a namespace
LimitTypePod LimitType = "Pod"
// Limit that applies to all containers in a namespace
LimitTypeContainer LimitType = "Container"
// Limit that applies to all persistent volume claims in a namespace
LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim"
)
// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
type LimitRangeItem struct {
// Type of resource that this limit applies to.
// +optional
Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
// Max usage constraints on this kind by resource name.
// +optional
Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"`
// Min usage constraints on this kind by resource name.
// +optional
Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"`
// Default resource requirement limit value by resource name if resource limit is omitted.
// +optional
Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"`
// DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
// +optional
DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"`
// MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
// +optional
MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"`
}
// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
type LimitRangeSpec struct {
// Limits is the list of LimitRangeItem objects that are enforced.
Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
type LimitRange struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the limits enforced.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRangeList is a list of LimitRange items.
type LimitRangeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of LimitRange objects.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_limit_range.md
Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// The following identify resource constants for Kubernetes object types
const (
// Pods, number
ResourcePods ResourceName = "pods"
// Services, number
ResourceServices ResourceName = "services"
// ReplicationControllers, number
ResourceReplicationControllers ResourceName = "replicationcontrollers"
// ResourceQuotas, number
ResourceQuotas ResourceName = "resourcequotas"
// ResourceSecrets, number
ResourceSecrets ResourceName = "secrets"
// ResourceConfigMaps, number
ResourceConfigMaps ResourceName = "configmaps"
// ResourcePersistentVolumeClaims, number
ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
// ResourceServicesNodePorts, number
ResourceServicesNodePorts ResourceName = "services.nodeports"
// ResourceServicesLoadBalancers, number
ResourceServicesLoadBalancers ResourceName = "services.loadbalancers"
// CPU request, in cores. (500m = .5 cores)
ResourceRequestsCPU ResourceName = "requests.cpu"
// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsMemory ResourceName = "requests.memory"
// Storage request, in bytes
ResourceRequestsStorage ResourceName = "requests.storage"
// Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage"
// CPU limit, in cores. (500m = .5 cores)
ResourceLimitsCPU ResourceName = "limits.cpu"
// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsMemory ResourceName = "limits.memory"
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
)
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
type ResourceQuotaScope string
const (
// Match all pod objects where spec.activeDeadlineSeconds
ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
// Match all pod objects where !spec.activeDeadlineSeconds
ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
// Match all pod objects that have best effort quality of service
ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
// Match all pod objects that do not have best effort quality of service
ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
)
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
type ResourceQuotaSpec struct {
// Hard is the set of desired hard limits for each named resource.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// A collection of filters that must match each object tracked by a quota.
// If not specified, the quota matches all objects.
// +optional
Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"`
}
// ResourceQuotaStatus defines the enforced hard limits and observed use.
type ResourceQuotaStatus struct {
// Hard is the set of enforced hard limits for each named resource.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// Used is the current observed total usage of the resource in the namespace.
// +optional
Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuota sets aggregate quota restrictions enforced per namespace
type ResourceQuota struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired quota.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status defines the actual enforced quota and its current usage.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuotaList is a list of ResourceQuota items.
type ResourceQuotaList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of ResourceQuota objects.
// More info: https://git.k8s.io/community/contributors/design-proposals/admission_control_resource_quota.md
Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Secret holds secret data of a certain type. The total bytes of the values in
// the Data field must be less than MaxSecretSize bytes.
type Secret struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data contains the secret data. Each key must consist of alphanumeric
// characters, '-', '_' or '.'. The serialized form of the secret data is a
// base64 encoded string, representing the arbitrary (possibly non-string)
// data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
// +optional
Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
// stringData allows specifying non-binary secret data in string form.
// It is provided as a write-only convenience method.
// All keys and values are merged into the data field on write, overwriting any existing values.
// It is never output when reading from the API.
// +k8s:conversion-gen=false
// +optional
StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"`
// Used to facilitate programmatic handling of secret data.
// +optional
Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"`
}
const MaxSecretSize = 1 * 1024 * 1024
type SecretType string
const (
// SecretTypeOpaque is the default. Arbitrary user-defined data
SecretTypeOpaque SecretType = "Opaque"
// SecretTypeServiceAccountToken contains a token that identifies a service account to the API
//
// Required fields:
// - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies
// - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies
// - Secret.Data["token"] - a token that identifies the service account to the API
SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
// ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountNameKey = "kubernetes.io/service-account.name"
// ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountUIDKey = "kubernetes.io/service-account.uid"
// ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets
ServiceAccountTokenKey = "token"
// ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets
ServiceAccountKubeconfigKey = "kubernetes.kubeconfig"
// ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets
ServiceAccountRootCAKey = "ca.crt"
// ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls
ServiceAccountNamespaceKey = "namespace"
// SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg
//
// Required fields:
// - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file
SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg"
// DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets
DockerConfigKey = ".dockercfg"
// SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json
//
// Required fields:
// - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file
SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson"
// DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets
DockerConfigJsonKey = ".dockerconfigjson"
// SecretTypeBasicAuth contains data needed for basic authentication.
//
// Required at least one of fields:
// - Secret.Data["username"] - username used for authentication
// - Secret.Data["password"] - password or token needed for authentication
SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth"
// BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets
BasicAuthUsernameKey = "username"
// BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets
BasicAuthPasswordKey = "password"
// SecretTypeSSHAuth contains data needed for SSH authetication.
//
// Required field:
// - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication
SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth"
// SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets
SSHAuthPrivateKey = "ssh-privatekey"
// SecretTypeTLS contains information about a TLS client or server secret. It
// is primarily used with TLS termination of the Ingress resource, but may be
// used in other types.
//
// Required fields:
// - Secret.Data["tls.key"] - TLS private key.
// Secret.Data["tls.crt"] - TLS certificate.
// TODO: Consider supporting different formats, specifying CA/destinationCA.
SecretTypeTLS SecretType = "kubernetes.io/tls"
// TLSCertKey is the key for tls certificates in a TLS secert.
TLSCertKey = "tls.crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls.key"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SecretList is a list of Secret.
type SecretList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of secret objects.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMap holds configuration data for pods to consume.
type ConfigMap struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data contains the configuration data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// +optional
Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMapList is a resource containing a list of ConfigMap objects.
type ConfigMapList struct {
metav1.TypeMeta `json:",inline"`
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of ConfigMaps.
Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Type and constants for component health validation.
type ComponentConditionType string
// These are the valid conditions for the component.
const (
ComponentHealthy ComponentConditionType = "Healthy"
)
// Information about the condition of a component.
type ComponentCondition struct {
// Type of condition for a component.
// Valid value: "Healthy"
Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"`
// Status of the condition for a component.
// Valid values for "Healthy": "True", "False", or "Unknown".
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Message about the condition for a component.
// For example, information about a health check.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// Condition error code for a component.
// For example, a health check error code.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
type ComponentStatus struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of component conditions observed
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Status of all the conditions for the component as a list of ComponentStatus objects.
type ComponentStatusList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ComponentStatus objects.
Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// DownwardAPIVolumeSource represents a volume containing downward API info.
// Downward API volumes support ownership management and SELinux relabeling.
type DownwardAPIVolumeSource struct {
// Items is a list of downward API volume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
const (
DownwardAPIVolumeSourceDefaultMode int32 = 0644
)
// DownwardAPIVolumeFile represents information to create the file containing the pod field
type DownwardAPIVolumeFile struct {
// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"`
}
// Represents downward API info for projecting into a projected volume.
// Note that this is identical to a downwardAPI volume source without the default
// mode.
type DownwardAPIProjection struct {
// Items is a list of DownwardAPIVolume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
}
// SecurityContext holds security configuration that will be applied to a container.
// Some fields are present in both SecurityContext and PodSecurityContext. When both
// are set, the values in SecurityContext take precedence.
type SecurityContext struct {
// The capabilities to add/drop when running containers.
// Defaults to the default set of capabilities granted by the container runtime.
// +optional
Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"`
// Run container in privileged mode.
// Processes in privileged containers are essentially equivalent to root on the host.
// Defaults to false.
// +optional
Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"`
// The SELinux context to be applied to the container.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"`
// Whether this container has a read-only root filesystem.
// Default is false.
// +optional
ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"`
// AllowPrivilegeEscalation controls whether a process can gain more
// privileges than its parent process. This bool directly controls if
// the no_new_privs flag will be set on the container process.
// AllowPrivilegeEscalation is true always when the container is:
// 1) run as Privileged
// 2) has CAP_SYS_ADMIN
// +optional
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
}
// SELinuxOptions are the labels to be applied to the container
type SELinuxOptions struct {
// User is a SELinux user label that applies to the container.
// +optional
User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"`
// Role is a SELinux role label that applies to the container.
// +optional
Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"`
// Type is a SELinux type label that applies to the container.
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"`
// Level is SELinux level label that applies to the container.
// +optional
Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RangeAllocation is not a public type.
type RangeAllocation struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Range is string that identifies the range represented by 'data'.
Range string `json:"range" protobuf:"bytes,2,opt,name=range"`
// Data is a bit array containing all allocated addresses in the previous segment.
Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"`
}
const (
// "default-scheduler" is the name of default scheduler.
DefaultSchedulerName = "default-scheduler"
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule.
// When the --hard-pod-affinity-weight scheduler flag is not specified,
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
DefaultHardPodAffinitySymmetricWeight int = 1
)
// Sysctl defines a kernel parameter to be set
type Sysctl struct {
// Name of a property to set
Name string `protobuf:"bytes,1,opt,name=name"`
// Value of a property to set
Value string `protobuf:"bytes,2,opt,name=value"`
}
// NodeResources is an object for conveying resource information about a node.
// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
type NodeResources struct {
// Capacity represents the available resources of a node
Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
}
const (
// Enable stdin for remote command execution
ExecStdinParam = "input"
// Enable stdout for remote command execution
ExecStdoutParam = "output"
// Enable stderr for remote command execution
ExecStderrParam = "error"
// Enable TTY for remote command execution
ExecTTYParam = "tty"
// Command to run for remote command execution
ExecCommandParam = "command"
// Name of header that specifies stream type
StreamType = "streamType"
// Value for streamType header for stdin stream
StreamTypeStdin = "stdin"
// Value for streamType header for stdout stream
StreamTypeStdout = "stdout"
// Value for streamType header for stderr stream
StreamTypeStderr = "stderr"
// Value for streamType header for data stream
StreamTypeData = "data"
// Value for streamType header for error stream
StreamTypeError = "error"
// Value for streamType header for terminal resize stream
StreamTypeResize = "resize"
// Name of header that specifies the port being forwarded
PortHeader = "port"
// Name of header that specifies a request ID used to associate the error
// and data streams for a single forwarded connection
PortForwardRequestIDHeader = "requestID"
)
|
package goldb
import (
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb"
)
func (s *Storage) ExecBatch(fn func(tx *Transaction)) error {
atomic.AddInt64(&s.cntWaitingTrans, 1)
defer atomic.AddInt64(&s.cntWaitingTrans, -1)
// put tx to batch
s.batchMx.Lock()
if s.batchExst == nil {
s.batchExst = make(chan struct{})
go s.startBatchSync()
}
if s.batchCl == nil { // new batch
s.batchCl, s.batchErr = make(chan struct{}), new(error)
}
cl, pErr := s.batchCl, s.batchErr
s.batchTxs = append(s.batchTxs, fn)
close(s.batchExst)
s.batchMx.Unlock()
//---
<-cl // waiting for batch commit
return *pErr
}
func (s *Storage) startBatchSync() {
defer func() {
s.batchMx.Lock()
s.batchExst = nil
s.batchMx.Unlock()
}()
for {
<-s.batchExst // waiting for batch txs
// pop all txs
s.batchMx.Lock()
txs, cl, pErr := s.batchTxs, s.batchCl, s.batchErr
s.batchExst, s.batchTxs, s.batchCl, s.batchErr = make(chan struct{}), nil, nil, nil
s.batchMx.Unlock()
//
// commit
err := s.Exec(func(t *Transaction) {
for _, fn := range txs {
fn(t)
}
})
*pErr = err
close(cl)
if err == leveldb.ErrClosed {
break
}
}
}
fix batch commit
package goldb
import (
"sync/atomic"
"github.com/syndtr/goleveldb/leveldb"
)
func (s *Storage) ExecBatch(fn func(tx *Transaction)) error {
atomic.AddInt64(&s.cntWaitingTrans, 1)
defer atomic.AddInt64(&s.cntWaitingTrans, -1)
// put tx to batch
s.batchMx.Lock()
if s.batchExst == nil {
s.batchExst = make(chan struct{})
go s.startBatchSync()
}
if s.batchCl == nil { // new batch
s.batchCl, s.batchErr = make(chan struct{}), new(error)
}
cl, pErr := s.batchCl, s.batchErr
s.batchTxs = append(s.batchTxs, fn)
if len(s.batchTxs) == 1 {
close(s.batchExst)
}
s.batchMx.Unlock()
//---
<-cl // waiting for batch commit
return *pErr
}
func (s *Storage) startBatchSync() {
defer func() {
s.batchMx.Lock()
s.batchExst = nil
s.batchMx.Unlock()
}()
for {
<-s.batchExst // waiting for batch txs
// pop all txs
s.batchMx.Lock()
txs, cl, pErr := s.batchTxs, s.batchCl, s.batchErr
s.batchExst, s.batchTxs, s.batchCl, s.batchErr = make(chan struct{}), nil, nil, nil
s.batchMx.Unlock()
//
// commit
err := s.Exec(func(t *Transaction) {
for _, fn := range txs {
fn(t)
}
})
*pErr = err
close(cl)
if err == leveldb.ErrClosed {
break
}
}
}
|
auth: fix typo in comments
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mime_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"mime"
)
func ExampleWordEncoder_Encode() {
fmt.Println(mime.QEncoding.Encode("utf-8", "¡Hola, señor!"))
fmt.Println(mime.QEncoding.Encode("utf-8", "Hello!"))
fmt.Println(mime.BEncoding.Encode("UTF-8", "¡Hola, señor!"))
fmt.Println(mime.QEncoding.Encode("ISO-8859-1", "Caf\xE9"))
// Output:
// =?utf-8?q?=C2=A1Hola,_se=C3=B1or!?=
// Hello!
// =?UTF-8?b?wqFIb2xhLCBzZcOxb3Ih?=
// =?ISO-8859-1?q?Caf=E9?=
}
func ExampleWordDecoder_Decode() {
dec := new(mime.WordDecoder)
header, err := dec.Decode("=?utf-8?q?=C2=A1Hola,_se=C3=B1or!?=")
if err != nil {
panic(err)
}
fmt.Println(header)
dec.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {
switch charset {
case "x-case":
// Fake character set for example.
// Real use would integrate with packages such
// as code.google.com/p/go-charset
content, err := ioutil.ReadAll(input)
if err != nil {
return nil, err
}
return bytes.NewReader(bytes.ToUpper(content)), nil
default:
return nil, fmt.Errorf("unhandled charset %q", charset)
}
}
header, err = dec.Decode("=?x-case?q?hello!?=")
if err != nil {
panic(err)
}
fmt.Println(header)
// Output:
// ¡Hola, señor!
// HELLO!
}
func ExampleWordDecoder_DecodeHeader() {
dec := new(mime.WordDecoder)
header, err := dec.DecodeHeader("=?utf-8?q?=C3=89ric?= <eric@example.org>, =?utf-8?q?Ana=C3=AFs?= <anais@example.org>")
if err != nil {
panic(err)
}
fmt.Println(header)
header, err = dec.DecodeHeader("=?utf-8?q?=C2=A1Hola,?= =?utf-8?q?_se=C3=B1or!?=")
if err != nil {
panic(err)
}
fmt.Println(header)
dec.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {
switch charset {
case "x-case":
// Fake character set for example.
// Real use would integrate with packages such
// as code.google.com/p/go-charset
content, err := ioutil.ReadAll(input)
if err != nil {
return nil, err
}
return bytes.NewReader(bytes.ToUpper(content)), nil
default:
return nil, fmt.Errorf("unhandled charset %q", charset)
}
}
header, err = dec.DecodeHeader("=?x-case?q?hello_?= =?x-case?q?world!?=")
if err != nil {
panic(err)
}
fmt.Println(header)
// Output:
// Éric <eric@example.org>, Anaïs <anais@example.org>
// ¡Hola, señor!
// HELLO WORLD!
}
mime: add examples for FormatMediaType and ParseMediaType
Change-Id: Ic129c58784ad1f0b8b90fc9d33e52bee61bdf0eb
Reviewed-on: https://go-review.googlesource.com/c/go/+/253237
Reviewed-by: Emmanuel Odeke <277f8eaa8c7dfbc55c17bf5e30561c1a9c45c569@gmail.com>
Run-TryBot: Emmanuel Odeke <277f8eaa8c7dfbc55c17bf5e30561c1a9c45c569@gmail.com>
TryBot-Result: Gobot Gobot <66cb808b70d30c07676d5e946fee83fd561249e5@golang.org>
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package mime_test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"mime"
)
func ExampleWordEncoder_Encode() {
fmt.Println(mime.QEncoding.Encode("utf-8", "¡Hola, señor!"))
fmt.Println(mime.QEncoding.Encode("utf-8", "Hello!"))
fmt.Println(mime.BEncoding.Encode("UTF-8", "¡Hola, señor!"))
fmt.Println(mime.QEncoding.Encode("ISO-8859-1", "Caf\xE9"))
// Output:
// =?utf-8?q?=C2=A1Hola,_se=C3=B1or!?=
// Hello!
// =?UTF-8?b?wqFIb2xhLCBzZcOxb3Ih?=
// =?ISO-8859-1?q?Caf=E9?=
}
func ExampleWordDecoder_Decode() {
dec := new(mime.WordDecoder)
header, err := dec.Decode("=?utf-8?q?=C2=A1Hola,_se=C3=B1or!?=")
if err != nil {
panic(err)
}
fmt.Println(header)
dec.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {
switch charset {
case "x-case":
// Fake character set for example.
// Real use would integrate with packages such
// as code.google.com/p/go-charset
content, err := ioutil.ReadAll(input)
if err != nil {
return nil, err
}
return bytes.NewReader(bytes.ToUpper(content)), nil
default:
return nil, fmt.Errorf("unhandled charset %q", charset)
}
}
header, err = dec.Decode("=?x-case?q?hello!?=")
if err != nil {
panic(err)
}
fmt.Println(header)
// Output:
// ¡Hola, señor!
// HELLO!
}
func ExampleWordDecoder_DecodeHeader() {
dec := new(mime.WordDecoder)
header, err := dec.DecodeHeader("=?utf-8?q?=C3=89ric?= <eric@example.org>, =?utf-8?q?Ana=C3=AFs?= <anais@example.org>")
if err != nil {
panic(err)
}
fmt.Println(header)
header, err = dec.DecodeHeader("=?utf-8?q?=C2=A1Hola,?= =?utf-8?q?_se=C3=B1or!?=")
if err != nil {
panic(err)
}
fmt.Println(header)
dec.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) {
switch charset {
case "x-case":
// Fake character set for example.
// Real use would integrate with packages such
// as code.google.com/p/go-charset
content, err := ioutil.ReadAll(input)
if err != nil {
return nil, err
}
return bytes.NewReader(bytes.ToUpper(content)), nil
default:
return nil, fmt.Errorf("unhandled charset %q", charset)
}
}
header, err = dec.DecodeHeader("=?x-case?q?hello_?= =?x-case?q?world!?=")
if err != nil {
panic(err)
}
fmt.Println(header)
// Output:
// Éric <eric@example.org>, Anaïs <anais@example.org>
// ¡Hola, señor!
// HELLO WORLD!
}
func ExampleFormatMediaType() {
mediatype := "text/html"
params := map[string]string{
"charset": "utf-8",
}
result := mime.FormatMediaType(mediatype, params)
fmt.Println("result:", result)
// Output:
// result: text/html; charset=utf-8
}
func ExampleParseMediaType() {
mediatype, params, err := mime.ParseMediaType("text/html; charset=utf-8")
if err != nil {
panic(err)
}
fmt.Println("type:", mediatype)
fmt.Println("charset:", params["charset"])
// Output:
// type: text/html
// charset: utf-8
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
NamespaceDefault string = "default"
// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
NamespaceAll string = ""
)
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
type Volume struct {
// Volume's name.
// Must be a DNS_LABEL and unique within the pod.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// VolumeSource represents the location and type of the mounted volume.
// If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"`
}
// Represents the source of a volume to mount.
// Only one of its members may be specified.
type VolumeSource struct {
// HostPath represents a pre-existing file or directory on the host
// machine that is directly exposed to the container. This is generally
// used for system agents or other privileged things that are allowed
// to see the host machine. Most containers will NOT need this.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// ---
// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
// mount host directories as read/write.
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"`
// EmptyDir represents a temporary directory that shares a pod's lifetime.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
// GitRepo represents a git repository at a particular revision.
// +optional
GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"`
// Secret represents a secret that should populate this volume.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"`
// NFS represents an NFS mount on the host that shares a pod's lifetime
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
// Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
// PersistentVolumeClaimVolumeSource represents a reference to a
// PersistentVolumeClaim in the same namespace.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
// Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
// DownwardAPI represents downward API about the pod that should populate this volume
// +optional
DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
// ConfigMap represents a configMap that should populate this volume
// +optional
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
// Items for all in one resources secrets, configmaps, and downward API
Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
// StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
// +optional
StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
// This volume finds the bound PV and mounts that volume for the pod. A
// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
// type of volume that is owned by someone else (the system).
type PersistentVolumeClaimVolumeSource struct {
// ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"`
// Will force the ReadOnly setting in VolumeMounts.
// Default false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
}
// PersistentVolumeSource is similar to VolumeSource but meant for the
// administrator who creates PVs. Exactly one of its members must be set.
type PersistentVolumeSource struct {
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
// HostPath represents a directory on the host.
// Provisioned by a developer or tester.
// This is useful for single-node development and testing only!
// On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
// Glusterfs represents a Glusterfs volume that is attached to a host and
// exposed to the pod. Provisioned by an admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
// NFS represents an NFS mount on the host. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional
RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// +optional
ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
// Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
// +optional
FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
// Local represents directly-attached storage with node affinity
// +optional
Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
// StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
// More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
// CSI represents storage that handled by an external CSI driver (Beta feature).
// +optional
CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
}
const (
// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation.
// It's currently still used and will be held for backwards compatibility
BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
// MountOptionAnnotation defines mount option annotation used in PVs
MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
// It is analogous to a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
type PersistentVolume struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines a specification of a persistent volume owned by the cluster.
// Provisioned by an administrator.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information/status for the persistent volume.
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// PersistentVolumeSpec is the specification of a persistent volume.
type PersistentVolumeSpec struct {
// A description of the persistent volume's resources and capacity.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// The actual volume backing the persistent volume.
PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"`
// AccessModes contains all ways the volume can be mounted.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
// Expected to be non-nil when bound.
// claim.VolumeName is the authoritative bind between PV and PVC.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
// +optional
ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"`
// What happens to a persistent volume when released from its claim.
// Valid options are Retain (default for manually created PersistentVolumes), Delete (default
// for dynamically provisioned PersistentVolumes), and Recycle (deprecated).
// Recycle must be supported by the volume plugin underlying this PersistentVolume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
// +optional
PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
// Name of StorageClass to which this persistent volume belongs. Empty value
// means that this volume does not belong to any StorageClass.
// +optional
StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"`
// A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
// simply fail if one is invalid.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
// +optional
MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
// volumeMode defines if a volume is intended to be used with a formatted filesystem
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
// This field influences the scheduling of pods that use this volume.
// +optional
NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"`
}
// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
type VolumeNodeAffinity struct {
// Required specifies hard node constraints that must be met.
Required *NodeSelector `json:"required,omitempty" protobuf:"bytes,1,opt,name=required"`
}
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
type PersistentVolumeReclaimPolicy string
const (
// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
// The volume plugin must support Recycling.
PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
// The volume plugin must support Deletion.
PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
// PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator.
// The default policy is Retain.
PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
)
// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem.
type PersistentVolumeMode string
const (
// PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device.
PersistentVolumeBlock PersistentVolumeMode = "Block"
// PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem.
PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem"
)
// PersistentVolumeStatus is the current status of a persistent volume.
type PersistentVolumeStatus struct {
// Phase indicates if a volume is available, bound to a claim, or released by a claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
// +optional
Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"`
// A human-readable message indicating details about why the volume is in this state.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
// Reason is a brief CamelCase string that describes any failure and is meant
// for machine parsing and tidy display in the CLI.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeList is a list of PersistentVolume items.
type PersistentVolumeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of persistent volumes.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaim is a user's request for and claim to a persistent volume
type PersistentVolumeClaim struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired characteristics of a volume requested by a pod author.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information/status of a persistent volume claim.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
type PersistentVolumeClaimList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// A list of persistent volume claims.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PersistentVolumeClaimSpec describes the common attributes of storage devices
// and allows a Source for provider-specific attributes
type PersistentVolumeClaimSpec struct {
// AccessModes contains the desired access modes the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// A label query over volumes to consider for binding.
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
// Resources represents the minimum resources the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
// VolumeName is the binding reference to the PersistentVolume backing this claim.
// +optional
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
// Name of the StorageClass required by the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
// +optional
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
}
// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
type PersistentVolumeClaimConditionType string
const (
// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
)
// PersistentVolumeClaimCondition contails details about state of pvc
type PersistentVolumeClaimCondition struct {
Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, this should be a short, machine understandable string that gives the reason
// for condition's last transition. If it reports "ResizeStarted" that means the underlying
// persistent volume is being resized.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
type PersistentVolumeClaimStatus struct {
// Phase represents the current phase of PersistentVolumeClaim.
// +optional
Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"`
// AccessModes contains the actual access modes the volume backing the PVC has.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// Represents the actual resources of the underlying volume.
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Current Condition of persistent volume claim. If underlying persistent volume is being
// resized then the Condition will be set to 'ResizeStarted'.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
}
type PersistentVolumeAccessMode string
const (
// can be mounted read/write mode to exactly 1 host
ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
// can be mounted in read-only mode to many hosts
ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
// can be mounted in read/write mode to many hosts
ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
)
type PersistentVolumePhase string
const (
// used for PersistentVolumes that are not available
VolumePending PersistentVolumePhase = "Pending"
// used for PersistentVolumes that are not yet bound
// Available volumes are held by the binder and matched to PersistentVolumeClaims
VolumeAvailable PersistentVolumePhase = "Available"
// used for PersistentVolumes that are bound
VolumeBound PersistentVolumePhase = "Bound"
// used for PersistentVolumes where the bound PersistentVolumeClaim was deleted
// released volumes must be recycled before becoming available again
// this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource
VolumeReleased PersistentVolumePhase = "Released"
// used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim
VolumeFailed PersistentVolumePhase = "Failed"
)
type PersistentVolumeClaimPhase string
const (
// used for PersistentVolumeClaims that are not yet bound
ClaimPending PersistentVolumeClaimPhase = "Pending"
// used for PersistentVolumeClaims that are bound
ClaimBound PersistentVolumeClaimPhase = "Bound"
// used for PersistentVolumeClaims that lost their underlying
// PersistentVolume. The claim was bound to a PersistentVolume and this
// volume does not exist any longer and all data on it was lost.
ClaimLost PersistentVolumeClaimPhase = "Lost"
)
type HostPathType string
const (
// For backwards compatible, leave it empty if unset
HostPathUnset HostPathType = ""
// If nothing exists at the given path, an empty directory will be created there
// as needed with file mode 0755, having the same group and ownership with Kubelet.
HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate"
// A directory must exist at the given path
HostPathDirectory HostPathType = "Directory"
// If nothing exists at the given path, an empty file will be created there
// as needed with file mode 0644, having the same group and ownership with Kubelet.
HostPathFileOrCreate HostPathType = "FileOrCreate"
// A file must exist at the given path
HostPathFile HostPathType = "File"
// A UNIX socket must exist at the given path
HostPathSocket HostPathType = "Socket"
// A character device must exist at the given path
HostPathCharDev HostPathType = "CharDevice"
// A block device must exist at the given path
HostPathBlockDev HostPathType = "BlockDevice"
)
// Represents a host path mapped into a pod.
// Host path volumes do not support ownership management or SELinux relabeling.
type HostPathVolumeSource struct {
// Path of the directory on the host.
// If the path is a symlink, it will follow the link to the real path.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Type for HostPath Volume
// Defaults to ""
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
}
// Represents an empty directory for a pod.
// Empty directory volumes support ownership management and SELinux relabeling.
type EmptyDirVolumeSource struct {
// What type of storage medium should back this directory.
// The default is "" which means to use the node's default medium.
// Must be an empty string (default) or Memory.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"`
// Total amount of local storage required for this EmptyDir volume.
// The size limit is also applicable for memory medium.
// The maximum usage on memory medium EmptyDir would be the minimum value between
// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
// The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// +optional
SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"`
}
// Represents a Glusterfs mount that lasts the lifetime of a pod.
// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsVolumeSource struct {
// EndpointsName is the endpoint name that details Glusterfs topology.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
// Path is the Glusterfs volume path.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDVolumeSource struct {
// A collection of Ceph monitors.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// The rados image name.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// The rados pool name.
// Default is rbd.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// The rados user name.
// Default is admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// Keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// SecretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDPersistentVolumeSource struct {
// A collection of Ceph monitors.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// The rados image name.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// The rados pool name.
// Default is rbd.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// The rados user name.
// Default is admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// Keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// SecretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
// Cinder volumes support ownership management and SELinux relabeling.
type CinderVolumeSource struct {
// volume id used to identify the volume in cinder
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// Optional: User is the rados user name, default is admin
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// SecretReference represents a Secret Reference. It has enough information to retrieve secret
// in any namespace
type SecretReference struct {
// Name is unique within a namespace to reference a secret resource.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Namespace defines the space within which the secret name must be unique.
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSPersistentVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// Optional: User is the rados user name, default is admin
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// Represents a Flocker volume mounted by the Flocker agent.
// One and only one of datasetName and datasetUUID should be set.
// Flocker volumes do not support ownership management or SELinux relabeling.
type FlockerVolumeSource struct {
// Name of the dataset stored as metadata -> name on the dataset for Flocker
// should be considered as deprecated
// +optional
DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"`
// UUID of the dataset. This is unique identifier of a Flocker dataset
// +optional
DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"`
}
// StorageMedium defines ways that storage can be allocated to a volume.
type StorageMedium string
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this
StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux)
StorageMediumHugePages StorageMedium = "HugePages" // use hugepages
)
// Protocol defines network protocols supported for things like container ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
)
// Represents a Persistent Disk resource in Google Compute Engine.
//
// A GCE PD must exist before mounting to a container. The disk must
// also be in the same GCE project and zone as the kubelet. A GCE PD
// can only be mounted as read/write once or read-only many times. GCE
// PDs support ownership management and SELinux relabeling.
type GCEPersistentDiskVolumeSource struct {
// Unique name of the PD resource in GCE. Used to identify the disk in GCE.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// The partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a Quobyte mount that lasts the lifetime of a pod.
// Quobyte volumes do not support ownership management or SELinux relabeling.
type QuobyteVolumeSource struct {
// Registry represents a single or multiple Quobyte Registry services
// specified as a string as host:port pair (multiple entries are separated with commas)
// which acts as the central registry for volumes
Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"`
// Volume is a string that references an already created Quobyte volume by name.
Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"`
// ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// User to map volume access to
// Defaults to serivceaccount user
// +optional
User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"`
// Group to map volume access to
// Default is no group
// +optional
Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"`
}
// FlexPersistentVolumeSource represents a generic persistent volume resource that is
// provisioned/attached using an exec based plugin.
type FlexPersistentVolumeSource struct {
// Driver is the name of the driver to use for this volume.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: SecretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: Extra command options if any.
// +optional
Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
}
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
type FlexVolumeSource struct {
// Driver is the name of the driver to use for this volume.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: SecretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: Extra command options if any.
// +optional
Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
}
// Represents a Persistent Disk resource in AWS.
//
// An AWS EBS disk must exist before mounting to a container. The disk
// must also be in the same AWS zone as the kubelet. An AWS EBS disk
// can only be mounted as read/write once. AWS EBS volumes support
// ownership management and SELinux relabeling.
type AWSElasticBlockStoreVolumeSource struct {
// Unique ID of the persistent disk resource in AWS (Amazon EBS volume).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// The partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// Specify "true" to force and set the ReadOnly property in VolumeMounts to "true".
// If omitted, the default is "false".
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a volume that is populated with the contents of a git repository.
// Git repo volumes do not support ownership management.
// Git repo volumes support SELinux relabeling.
type GitRepoVolumeSource struct {
// Repository URL
Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"`
// Commit hash for the specified revision.
// +optional
Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
// Target directory name.
// Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
// git repository. Otherwise, if specified, the volume will contain the git repository in
// the subdirectory with the given name.
// +optional
Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"`
}
// Adapts a Secret into a volume.
//
// The contents of the target Secret's Data field will be presented in a volume
// as files using the keys in the Data field as the file names.
// Secret volumes support ownership management and SELinux relabeling.
type SecretVolumeSource struct {
// Name of the secret in the pod's namespace to use.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"`
// Specify whether the Secret or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
SecretVolumeSourceDefaultMode int32 = 0644
)
// Adapts a secret into a projected volume.
//
// The contents of the target Secret's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names.
// Note that this is identical to a secret volume source without the default
// mode.
type SecretProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Specify whether the Secret or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents an NFS mount that lasts the lifetime of a pod.
// NFS volumes do not support ownership management or SELinux relabeling.
type NFSVolumeSource struct {
// Server is the hostname or IP address of the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Server string `json:"server" protobuf:"bytes,1,opt,name=server"`
// Path that is exported by the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// ReadOnly here will force
// the NFS export to be mounted with read-only permissions.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIVolumeSource struct {
// iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// Target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// iSCSI Target Lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// iSCSI Interface Name that uses an iSCSI transport.
// Defaults to 'default' (tcp).
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// CHAP Secret for iSCSI target and initiator authentication
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// Custom iSCSI Initiator Name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// ISCSIPersistentVolumeSource represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIPersistentVolumeSource struct {
// iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// Target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// iSCSI Target Lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// iSCSI Interface Name that uses an iSCSI transport.
// Defaults to 'default' (tcp).
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// CHAP Secret for iSCSI target and initiator authentication
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// Custom iSCSI Initiator Name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// Represents a Fibre Channel volume.
// Fibre Channel volumes can only be mounted as read/write once.
// Fibre Channel volumes support ownership management and SELinux relabeling.
type FCVolumeSource struct {
// Optional: FC target worldwide names (WWNs)
// +optional
TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"`
// Optional: FC target lun number
// +optional
Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: FC volume world wide identifiers (wwids)
// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
// +optional
WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFileVolumeSource struct {
// the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFilePersistentVolumeSource struct {
// the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// the namespace of the secret that contains Azure Storage Account Name and Key
// default is the same as the Pod
// +optional
SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"`
}
// Represents a vSphere volume resource.
type VsphereVirtualDiskVolumeSource struct {
// Path that identifies vSphere volume vmdk
VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Storage Policy Based Management (SPBM) profile name.
// +optional
StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"`
// Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
// +optional
StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"`
}
// Represents a Photon Controller persistent disk resource.
type PhotonPersistentDiskVolumeSource struct {
// ID that identifies Photon Controller persistent disk
PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
type AzureDataDiskCachingMode string
type AzureDataDiskKind string
const (
AzureDataDiskCachingNone AzureDataDiskCachingMode = "None"
AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly"
AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite"
AzureSharedBlobDisk AzureDataDiskKind = "Shared"
AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated"
AzureManagedDisk AzureDataDiskKind = "Managed"
)
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
type AzureDiskVolumeSource struct {
// The Name of the data disk in the blob storage
DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"`
// The URI the data disk in the blob storage
DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
// Host Caching mode: None, Read Only, Read Write.
// +optional
CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
// Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
}
// PortworxVolumeSource represents a Portworx volume resource.
type PortworxVolumeSource struct {
// VolumeID uniquely identifies a Portworx volume
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// FSType represents the filesystem type to mount
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// ScaleIOVolumeSource represents a persistent ScaleIO volume
type ScaleIOVolumeSource struct {
// The host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// The name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// The name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// The ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
type ScaleIOPersistentVolumeSource struct {
// The host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// The name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// The name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// The ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// VolumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// SecretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSPersistentVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// VolumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// SecretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// volume as files using the keys in the Data field as the file names, unless
// the items element is populated with specific mappings of keys to paths.
// ConfigMap volumes support ownership management and SELinux relabeling.
type ConfigMapVolumeSource struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
ConfigMapVolumeSourceDefaultMode int32 = 0644
)
// Adapts a ConfigMap into a projected volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names,
// unless the items element is populated with specific mappings of keys to paths.
// Note that this is identical to a configmap volume source without the default
// mode.
type ConfigMapProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents a projected volume source
type ProjectedVolumeSource struct {
// list of volume projections
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
// Mode bits to use on created files by default. Must be a value between
// 0 and 0777.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
// Projection that may be projected along with other supported volume types
type VolumeProjection struct {
// all types below are the supported types for projection into the same volume
// information about the secret data to project
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
// information about the downwardAPI data to project
DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"`
// information about the configMap data to project
ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
}
const (
ProjectedVolumeSourceDefaultMode int32 = 0644
)
// Maps a string key to a path within a volume.
type KeyToPath struct {
// The key to project.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// The relative path of the file to map the key to.
// May not be an absolute path.
// May not contain the path element '..'.
// May not start with the string '..'.
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
}
// Local represents directly-attached storage with node affinity (Beta feature)
type LocalVolumeSource struct {
// The full path to the volume on the node.
// It can be either a directory or block device (disk, partition, ...).
// Directories can be represented only by PersistentVolume with VolumeMode=Filesystem.
// Block devices can be represented only by VolumeMode=Block, which also requires the
// BlockVolume alpha feature gate to be enabled.
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
}
// Represents storage that is managed by an external CSI volume driver (Beta feature)
type CSIPersistentVolumeSource struct {
// Driver is the name of the driver to use for this volume.
// Required.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// VolumeHandle is the unique volume name returned by the CSI volume
// plugin’s CreateVolume to refer to the volume on all subsequent calls.
// Required.
VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"`
// Optional: The value to pass to ControllerPublishVolumeRequest.
// Defaults to false (read/write).
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// Attributes of the volume to publish.
// +optional
VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,5,rep,name=volumeAttributes"`
// ControllerPublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerPublishVolume and ControllerUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"`
// NodeStageSecretRef is a reference to the secret object containing sensitive
// information to pass to the CSI driver to complete the CSI NodeStageVolume
// and NodeStageVolume and NodeUnstageVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"`
// NodePublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodePublishVolume and NodeUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"`
}
// ContainerPort represents a network port in a single container.
type ContainerPort struct {
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
// named port in a pod must have a unique name. Name for the port that can be
// referred to by services.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Number of port to expose on the host.
// If specified, this must be a valid port number, 0 < x < 65536.
// If HostNetwork is specified, this must match ContainerPort.
// Most containers do not need this.
// +optional
HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"`
// Number of port to expose on the pod's IP address.
// This must be a valid port number, 0 < x < 65536.
ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"`
// Protocol for port. Must be UDP or TCP.
// Defaults to "TCP".
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
// What host IP to bind the external port to.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
}
// VolumeMount describes a mounting of a Volume within a container.
type VolumeMount struct {
// This must match the Name of a Volume.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Mounted read-only if true, read-write otherwise (false or unspecified).
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
// Path within the container at which the volume should be mounted. Must
// not contain ':'.
MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"`
// Path within the volume from which the container's volume should be mounted.
// Defaults to "" (volume's root).
// +optional
SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"`
// mountPropagation determines how mounts are propagated from the host
// to container and the other way around.
// When not set, MountPropagationHostToContainer is used.
// This field is beta in 1.10.
// +optional
MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"`
}
// MountPropagationMode describes mount propagation.
type MountPropagationMode string
const (
// MountPropagationNone means that the volume in a container will
// not receive new mounts from the host or other containers, and filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode corresponds to "private" in Linux terminology.
MountPropagationNone MountPropagationMode = "None"
// MountPropagationHostToContainer means that the volume in a container will
// receive new mounts from the host or other containers, but filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rslave" in Linux terminology).
MountPropagationHostToContainer MountPropagationMode = "HostToContainer"
// MountPropagationBidirectional means that the volume in a container will
// receive new mounts from the host or other containers, and its own mounts
// will be propagated from the container to the host or other containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rshared" in Linux terminology).
MountPropagationBidirectional MountPropagationMode = "Bidirectional"
)
// volumeDevice describes a mapping of a raw block device within a container.
type VolumeDevice struct {
// name must match the name of a persistentVolumeClaim in the pod
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// devicePath is the path inside of the container that the device will be mapped to.
DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"`
}
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Name of the environment variable. Must be a C_IDENTIFIER.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Optional: no more than one of the following may be specified.
// Variable references $(VAR_NAME) are expanded
// using the previous defined environment variables in the container and
// any service environment variables. If a variable cannot be resolved,
// the reference in the input string will be unchanged. The $(VAR_NAME)
// syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
// references will never be expanded, regardless of whether the variable
// exists or not.
// Defaults to "".
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Source for the environment variable's value. Cannot be used if value is not empty.
// +optional
ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"`
}
// EnvVarSource represents a source for the value of an EnvVar.
type EnvVarSource struct {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"`
// Selects a key of a ConfigMap.
// +optional
ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"`
// Selects a key of a secret in the pod's namespace
// +optional
SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
}
// ObjectFieldSelector selects an APIVersioned field of an object.
type ObjectFieldSelector struct {
// Version of the schema the FieldPath is written in terms of, defaults to "v1".
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
// Path of the field to select in the specified API version.
FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"`
}
// ResourceFieldSelector represents container resources (cpu, memory) and their output format
type ResourceFieldSelector struct {
// Container name: required for volumes, optional for env vars
// +optional
ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
// Required: resource to select
Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
// Specifies the output format of the exposed resources, defaults to "1"
// +optional
Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"`
}
// Selects a key from a ConfigMap.
type ConfigMapKeySelector struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key to select.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the ConfigMap or it's key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// SecretKeySelector selects a key of a Secret.
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key of the secret to select from. Must be a valid secret key.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the Secret or it's key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// EnvFromSource represents the source of a set of ConfigMaps
type EnvFromSource struct {
// An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
// +optional
Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
// The ConfigMap to select from
// +optional
ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"`
// The Secret to select from
// +optional
SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
}
// ConfigMapEnvSource selects a ConfigMap to populate the environment
// variables with.
//
// The contents of the target ConfigMap's Data field will represent the
// key-value pairs as environment variables.
type ConfigMapEnvSource struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the ConfigMap must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// SecretEnvSource selects a Secret to populate the environment
// variables with.
//
// The contents of the target Secret's Data field will represent the
// key-value pairs as environment variables.
type SecretEnvSource struct {
// The Secret to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the Secret must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// HTTPHeader describes a custom header to be used in HTTP probes
type HTTPHeader struct {
// The header field name
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// The header field value
Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
}
// HTTPGetAction describes an action based on HTTP Get requests.
type HTTPGetAction struct {
// Path to access on the HTTP server.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
// Name or number of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"`
// Host name to connect to, defaults to the pod IP. You probably want to set
// "Host" in httpHeaders instead.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"`
// Scheme to use for connecting to the host.
// Defaults to HTTP.
// +optional
Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"`
// Custom headers to set in the request. HTTP allows repeated headers.
// +optional
HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"`
}
// URIScheme identifies the scheme used for connection to a host for Get actions
type URIScheme string
const (
// URISchemeHTTP means that the scheme used will be http://
URISchemeHTTP URIScheme = "HTTP"
// URISchemeHTTPS means that the scheme used will be https://
URISchemeHTTPS URIScheme = "HTTPS"
)
// TCPSocketAction describes an action based on opening a socket
type TCPSocketAction struct {
// Number or name of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"`
// Optional: Host name to connect to, defaults to the pod IP.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// ExecAction describes a "run in container" action.
type ExecAction struct {
// Command is the command line to execute inside the container, the working directory for the
// command is root ('/') in the container's filesystem. The command is simply exec'd, it is
// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
// a shell, you need to explicitly call out to that shell.
// Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
}
// Probe describes a health check to be performed against a container to determine whether it is
// alive or ready to receive traffic.
type Probe struct {
// The action taken to determine the health of a container
Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"`
// Number of seconds after the container has started before liveness probes are initiated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"`
// Number of seconds after which the probe times out.
// Defaults to 1 second. Minimum value is 1.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
// How often (in seconds) to perform the probe.
// Default to 10 seconds. Minimum value is 1.
// +optional
PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"`
// Minimum consecutive successes for the probe to be considered successful after having failed.
// Defaults to 1. Must be 1 for liveness. Minimum value is 1.
// +optional
SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"`
// Minimum consecutive failures for the probe to be considered failed after having succeeded.
// Defaults to 3. Minimum value is 1.
// +optional
FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"`
}
// PullPolicy describes a policy for if/when to pull a container image
type PullPolicy string
const (
// PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
PullAlways PullPolicy = "Always"
// PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
PullNever PullPolicy = "Never"
// PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
PullIfNotPresent PullPolicy = "IfNotPresent"
)
// TerminationMessagePolicy describes how termination messages are retrieved from a container.
type TerminationMessagePolicy string
const (
// TerminationMessageReadFile is the default behavior and will set the container status message to
// the contents of the container's terminationMessagePath when the container exits.
TerminationMessageReadFile TerminationMessagePolicy = "File"
// TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs
// for the container status message when the container exits with an error and the
// terminationMessagePath has no contents.
TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError"
)
// Capability represent POSIX capabilities type
type Capability string
// Adds and removes POSIX capabilities from running containers.
type Capabilities struct {
// Added capabilities
// +optional
Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"`
// Removed capabilities
// +optional
Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"`
}
// ResourceRequirements describes the compute resource requirements.
type ResourceRequirements struct {
// Limits describes the maximum amount of compute resources allowed.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
// Requests describes the minimum amount of compute resources required.
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
// otherwise to an implementation-defined value.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
}
const (
// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
TerminationMessagePathDefault string = "/dev/termination-log"
)
// A single application container that you want to run within a pod.
type Container struct {
// Name of the container specified as a DNS_LABEL.
// Each container in a pod must have a unique name (DNS_LABEL).
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Docker image name.
// More info: https://kubernetes.io/docs/concepts/containers/images
// This field is optional to allow higher level config management to default or override
// container images in workload controllers like Deployments and StatefulSets.
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The docker image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The docker image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Container's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of ports to expose from the container. Exposing a port here gives
// the system additional information about the network connections a
// container uses, but is primarily informational. Not specifying a port here
// DOES NOT prevent that port from being exposed. Any port which is
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Cannot be updated.
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
// Compute Resources required by this container.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the container.
// This is an alpha feature and may change in the future.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
// Periodic probe of container liveness.
// Container will be restarted if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Periodic probe of container service readiness.
// Container will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// Actions that the management system should take in response to container lifecycle events.
// Cannot be updated.
// +optional
Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Optional: Path at which the file to which the container's termination message
// will be written is mounted into the container's filesystem.
// Message written is intended to be brief final status, such as an assertion failure message.
// Will be truncated by the node if greater than 4096 bytes. The total message length across
// all containers will be limited to 12kb.
// Defaults to /dev/termination-log.
// Cannot be updated.
// +optional
TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Indicate how the termination message should be populated. File will use the contents of
// terminationMessagePath to populate the container status message on both success and failure.
// FallbackToLogsOnError will use the last chunk of container log output if the termination
// message file is empty and the container exited with an error.
// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
// Defaults to File.
// Cannot be updated.
// +optional
TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// Security options the pod should run with.
// More info: https://kubernetes.io/docs/concepts/policy/security-context/
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
// and shouldn't be used for general purpose containers.
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
// Default is false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the container is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
// +optional
StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
// Default is false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
}
// Handler defines a specific action that should be taken
// TODO: pass structured data to these actions, and document that data here.
type Handler struct {
// One and only one of the following should be specified.
// Exec specifies the action to take.
// +optional
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
// HTTPGet specifies the http request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
// TCPSocket specifies an action involving a TCP port.
// TCP hooks not yet supported
// TODO: implement a realistic TCP lifecycle hook
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
}
// Lifecycle describes actions that the management system should take in response to container lifecycle
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
// until the action is complete, unless the container process fails, in which case the handler is aborted.
type Lifecycle struct {
// PostStart is called immediately after a container is created. If the handler fails,
// the container is terminated and restarted according to its restart policy.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"`
// PreStop is called immediately before a container is terminated.
// The container is terminated after the handler completes.
// The reason for termination is passed to the handler.
// Regardless of the outcome of the handler, the container is eventually terminated.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
}
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// ContainerStateWaiting is a waiting state of a container.
type ContainerStateWaiting struct {
// (brief) reason the container is not yet running.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"`
// Message regarding why the container is not yet running.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
}
// ContainerStateRunning is a running state of a container.
type ContainerStateRunning struct {
// Time at which the container was last (re-)started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"`
}
// ContainerStateTerminated is a terminated state of a container.
type ContainerStateTerminated struct {
// Exit status from the last termination of the container
ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"`
// Signal from the last termination of the container
// +optional
Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"`
// (brief) reason from the last termination of the container
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Message regarding the last termination of the container
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// Time at which previous execution of the container started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"`
// Time at which the container last terminated
// +optional
FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"`
// Container's ID in the format 'docker://<container_id>'
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"`
}
// ContainerState holds a possible state of container.
// Only one of its members may be specified.
// If none of them is specified, the default one is ContainerStateWaiting.
type ContainerState struct {
// Details about a waiting container
// +optional
Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"`
// Details about a running container
// +optional
Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"`
// Details about a terminated container
// +optional
Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"`
}
// ContainerStatus contains details for the current status of this container.
type ContainerStatus struct {
// This must be a DNS_LABEL. Each container in a pod must have a unique name.
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Details about the container's current condition.
// +optional
State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"`
// Details about the container's last termination condition.
// +optional
LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"`
// Specifies whether the container has passed its readiness probe.
Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"`
// The number of times the container has been restarted, currently based on
// the number of dead containers that have not yet been removed.
// Note that this is calculated from dead containers. But those containers are subject to
// garbage collection. This value will get capped at 5 by GC.
RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"`
// The image the container is running.
// More info: https://kubernetes.io/docs/concepts/containers/images
// TODO(dchen1107): Which image the container is running with?
Image string `json:"image" protobuf:"bytes,6,opt,name=image"`
// ImageID of the container's image.
ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"`
// Container's ID in the format 'docker://<container_id>'.
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"`
}
// PodPhase is a label for the condition of a pod at the current time.
type PodPhase string
// These are the valid statuses of pods.
const (
// PodPending means the pod has been accepted by the system, but one or more of the containers
// has not been started. This includes time before being bound to a node, as well as time spent
// pulling images onto the host.
PodPending PodPhase = "Pending"
// PodRunning means the pod has been bound to a node and all of the containers have been started.
// At least one container is still running or is in the process of being restarted.
PodRunning PodPhase = "Running"
// PodSucceeded means that all containers in the pod have voluntarily terminated
// with a container exit code of 0, and the system is not going to restart any of these containers.
PodSucceeded PodPhase = "Succeeded"
// PodFailed means that all containers in the pod have terminated, and at least one container has
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
PodFailed PodPhase = "Failed"
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
// to an error in communicating with the host of the pod.
PodUnknown PodPhase = "Unknown"
)
// PodConditionType is a valid value for PodCondition.Type
type PodConditionType string
// These are valid conditions of pod.
const (
// PodScheduled represents status of the scheduling process for this pod.
PodScheduled PodConditionType = "PodScheduled"
// PodReady means the pod is able to service requests and should be added to the
// load balancing pools of all matching services.
PodReady PodConditionType = "Ready"
// PodInitialized means that all init containers in the pod have started successfully.
PodInitialized PodConditionType = "Initialized"
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
PodReasonUnschedulable = "Unschedulable"
)
// PodCondition contains details for the current condition of this pod.
type PodCondition struct {
// Type is the type of the condition.
// Currently only Ready.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
// Status is the status of the condition.
// Can be True, False, Unknown.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// RestartPolicy describes how the container should be restarted.
// Only one of the following restart policies may be specified.
// If none of the following policies is specified, the default one
// is RestartPolicyAlways.
type RestartPolicy string
const (
RestartPolicyAlways RestartPolicy = "Always"
RestartPolicyOnFailure RestartPolicy = "OnFailure"
RestartPolicyNever RestartPolicy = "Never"
)
// DNSPolicy defines how a pod's DNS will be configured.
type DNSPolicy string
const (
// DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS
// first, if it is available, then fall back on the default
// (as determined by kubelet) DNS settings.
DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet"
// DNSClusterFirst indicates that the pod should use cluster DNS
// first unless hostNetwork is true, if it is available, then
// fall back on the default (as determined by kubelet) DNS settings.
DNSClusterFirst DNSPolicy = "ClusterFirst"
// DNSDefault indicates that the pod should use the default (as
// determined by kubelet) DNS settings.
DNSDefault DNSPolicy = "Default"
// DNSNone indicates that the pod should use empty DNS settings. DNS
// parameters such as nameservers and search paths should be defined via
// DNSConfig.
DNSNone DNSPolicy = "None"
)
const (
// DefaultTerminationGracePeriodSeconds indicates the default duration in
// seconds a pod needs to terminate gracefully.
DefaultTerminationGracePeriodSeconds = 30
)
// A node selector represents the union of the results of one or more label queries
// over a set of nodes; that is, it represents the OR of the selectors represented
// by the node selector terms.
type NodeSelector struct {
//Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
}
// A null or empty node selector term matches no objects. The requirements of
// them are ANDed.
type NodeSelectorTerm struct {
// A list of node selector requirements by node's labels.
// +optional
MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"`
// A list of node selector requirements by node's fields.
// +optional
MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" protobuf:"bytes,2,rep,name=matchFields"`
}
// A node selector requirement is a selector that contains values, a key, and an operator
// that relates the key and values.
type NodeSelectorRequirement struct {
// The label key that the selector applies to.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Represents a key's relationship to a set of values.
// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"`
// An array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty. If the operator is Gt or Lt, the values
// array must have a single element, which will be interpreted as an integer.
// This array is replaced during a strategic merge patch.
// +optional
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
// A node selector operator is the set of operators that can be used in
// a node selector requirement.
type NodeSelectorOperator string
const (
NodeSelectorOpIn NodeSelectorOperator = "In"
NodeSelectorOpNotIn NodeSelectorOperator = "NotIn"
NodeSelectorOpExists NodeSelectorOperator = "Exists"
NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
NodeSelectorOpGt NodeSelectorOperator = "Gt"
NodeSelectorOpLt NodeSelectorOperator = "Lt"
)
// Affinity is a group of affinity scheduling rules.
type Affinity struct {
// Describes node affinity scheduling rules for the pod.
// +optional
NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"`
// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"`
// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"`
}
// Pod affinity is a group of inter pod affinity scheduling rules.
type PodAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
type PodAntiAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the anti-affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling anti-affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
type WeightedPodAffinityTerm struct {
// weight associated with matching the corresponding podAffinityTerm,
// in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// Required. A pod affinity term, associated with the corresponding weight.
PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"`
}
// Defines a set of pods (namely those matching the labelSelector
// relative to the given namespace(s)) that this pod should be
// co-located (affinity) or not co-located (anti-affinity) with,
// where co-located is defined as running on a node whose value of
// the label with key <topologyKey> matches that of any node on which
// a pod of the set of pods is running
type PodAffinityTerm struct {
// A label query over a set of resources, in this case pods.
// +optional
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
// namespaces specifies which namespaces the labelSelector applies to (matches against);
// null or empty list means "this pod's namespace"
// +optional
Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"`
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
// whose value of the label with key topologyKey matches that of any node on which any of the
// selected pods is running.
// Empty topologyKey is not allowed.
TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"`
}
// Node affinity is a group of node affinity scheduling rules.
type NodeAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// will try to eventually evict the pod from its node.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// may or may not try to eventually evict the pod from its node.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node matches the corresponding matchExpressions; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// An empty preferred scheduling term matches all objects with implicit weight 0
// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
type PreferredSchedulingTerm struct {
// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// A node selector term, associated with the corresponding weight.
Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
}
// The node this Taint is attached to has the "effect" on
// any pod that does not tolerate the Taint.
type Taint struct {
// Required. The taint key to be applied to a node.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Required. The taint value corresponding to the taint key.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Required. The effect of the taint on pods
// that do not tolerate the taint.
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
// TimeAdded represents the time at which the taint was added.
// It is only written for NoExecute taints.
// +optional
TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
}
type TaintEffect string
const (
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
// Enforced by the scheduler.
TaintEffectNoSchedule TaintEffect = "NoSchedule"
// Like TaintEffectNoSchedule, but the scheduler tries not to schedule
// new pods onto the node, rather than prohibiting new pods from scheduling
// onto the node entirely. Enforced by the scheduler.
TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// Like TaintEffectNoSchedule, but additionally do not allow pods submitted to
// Kubelet without going through the scheduler to start.
// Enforced by Kubelet and the scheduler.
// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
// Evict any already-running pods that do not tolerate the taint.
// Currently enforced by NodeController.
TaintEffectNoExecute TaintEffect = "NoExecute"
)
// The pod this Toleration is attached to tolerates any taint that matches
// the triple <key,value,effect> using the matching operator <operator>.
type Toleration struct {
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
// +optional
Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
// Operator represents a key's relationship to the value.
// Valid operators are Exists and Equal. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
// +optional
Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
// Value is the taint value the toleration matches to.
// If the operator is Exists, the value should be empty, otherwise just a regular string.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
// Effect indicates the taint effect to match. Empty means match all taint effects.
// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
// +optional
Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
// TolerationSeconds represents the period of time the toleration (which must be
// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
// it is not set, which means tolerate the taint forever (do not evict). Zero and
// negative values will be treated as 0 (evict immediately) by the system.
// +optional
TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"`
}
// A toleration operator is the set of operators that can be used in a toleration.
type TolerationOperator string
const (
TolerationOpExists TolerationOperator = "Exists"
TolerationOpEqual TolerationOperator = "Equal"
)
// PodSpec is a description of a pod.
type PodSpec struct {
// List of volumes that can be mounted by containers belonging to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes
// +optional
// +patchMergeKey=name
// +patchStrategy=merge,retainKeys
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
// List of initialization containers belonging to the pod.
// Init containers are executed in order prior to containers being started. If any
// init container fails, the pod is considered to have failed and is handled according
// to its restartPolicy. The name for an init container or normal container must be
// unique among all containers.
// Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
// of that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
// +patchMergeKey=name
// +patchStrategy=merge
InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
// List of containers belonging to the pod.
// Containers cannot currently be added or removed.
// There must be at least one container in a Pod.
// Cannot be updated.
// +patchMergeKey=name
// +patchStrategy=merge
Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
// Restart policy for all containers within the pod.
// One of Always, OnFailure, Never.
// Default to Always.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
// +optional
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
// Value must be non-negative integer. The value zero indicates delete immediately.
// If this value is nil, the default grace period will be used instead.
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
// Defaults to 30 seconds.
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
// Optional duration in seconds the pod may be active on the node relative to
// StartTime before the system will actively try to mark it failed and kill associated containers.
// Value must be a positive integer.
// +optional
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
// Set DNS policy for the pod.
// Defaults to "ClusterFirst".
// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
// To have DNS options set along with hostNetwork, you have to specify DNS policy
// explicitly to 'ClusterFirstWithHostNet'.
// +optional
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
// Deprecated: Use serviceAccountName instead.
// +k8s:conversion-gen=false
// +optional
DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"`
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
// requirements.
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
// Host networking requested for this pod. Use the host's network namespace.
// If this option is set, the ports that will be used must be specified.
// Default to false.
// +k8s:conversion-gen=false
// +optional
HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
// Use the host's pid namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
// Use the host's ipc namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
// Share a single process namespace between all of the containers in a pod.
// When this is set containers will be able to view and signal processes from other containers
// in the same pod, and the first process in each container will not be assigned PID 1.
// HostPID and ShareProcessNamespace cannot both be set.
// Optional: Default to false.
// This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature.
// +k8s:conversion-gen=false
// +optional
ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
// If specified, these secrets will be passed to individual puller implementations for them to use. For example,
// in the case of docker, only DockerConfig type secrets are honored.
// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
// Specifies the hostname of the Pod
// If not specified, the pod's hostname will be set to a system-defined value.
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
// If not specified, the pod will not have a domainname at all.
// +optional
Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
// If specified, the pod's scheduling constraints
// +optional
Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
// If specified, the pod will be dispatched by specified scheduler.
// If not specified, the pod will be dispatched by default scheduler.
// +optional
SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
// If specified, the pod's tolerations.
// +optional
Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
// file if specified. This is only valid for non-hostNetwork pods.
// +optional
// +patchMergeKey=ip
// +patchStrategy=merge
HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"`
// If specified, indicates the pod's priority. "system-node-critical" and
// "system-cluster-critical" are two special keywords which indicate the
// highest priorities with the former being the highest priority. Any other
// name must be defined by creating a PriorityClass object with that name.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"`
// The priority value. Various system components use this field to find the
// priority of the pod. When Priority Admission Controller is enabled, it
// prevents users from setting this field. The admission controller populates
// this field from PriorityClassName.
// The higher the value, the higher the priority.
// +optional
Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"`
// Specifies the DNS parameters of a pod.
// Parameters specified here will be merged to the generated DNS
// configuration based on DNSPolicy.
// +optional
DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
}
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
// pod's hosts file.
type HostAlias struct {
// IP address of the host file entry.
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostnames for the above IP address.
Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
}
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
type PodSecurityContext struct {
// The SELinux context to be applied to all containers.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in SecurityContext. If set in
// both SecurityContext and PodSecurityContext, the value specified in SecurityContext
// takes precedence for that container.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence
// for that container.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"`
// The GID to run the entrypoint of the container process.
// Uses runtime default if unset.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence
// for that container.
// +optional
RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,6,opt,name=runAsGroup"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
// A list of groups applied to the first process run in each container, in addition
// to the container's primary GID. If unspecified, no groups will be added to
// any container.
// +optional
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
// A special supplemental group that applies to all containers in a pod.
// Some volume types allow the Kubelet to change the ownership of that volume
// to be owned by the pod:
//
// 1. The owning GID will be the FSGroup
// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
// 3. The permission bits are OR'd with rw-rw----
//
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
// +optional
FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"`
}
// PodQOSClass defines the supported qos classes of Pods.
type PodQOSClass string
const (
// PodQOSGuaranteed is the Guaranteed qos class.
PodQOSGuaranteed PodQOSClass = "Guaranteed"
// PodQOSBurstable is the Burstable qos class.
PodQOSBurstable PodQOSClass = "Burstable"
// PodQOSBestEffort is the BestEffort qos class.
PodQOSBestEffort PodQOSClass = "BestEffort"
)
// PodDNSConfig defines the DNS parameters of a pod in addition to
// those generated from DNSPolicy.
type PodDNSConfig struct {
// A list of DNS name server IP addresses.
// This will be appended to the base nameservers generated from DNSPolicy.
// Duplicated nameservers will be removed.
// +optional
Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"`
// A list of DNS search domains for host-name lookup.
// This will be appended to the base search paths generated from DNSPolicy.
// Duplicated search paths will be removed.
// +optional
Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"`
// A list of DNS resolver options.
// This will be merged with the base options generated from DNSPolicy.
// Duplicated entries will be removed. Resolution options given in Options
// will override those that appear in the base DNSPolicy.
// +optional
Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"`
}
// PodDNSConfigOption defines DNS resolver options of a pod.
type PodDNSConfigOption struct {
// Required.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// +optional
Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
}
// PodStatus represents information about the status of a pod. Status may trail the actual
// state of a system.
type PodStatus struct {
// Current condition of the pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
// +optional
Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"`
// Current service state of pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
// A human readable message indicating details about why the pod is in this condition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// A brief CamelCase message indicating details about why the pod is in this state.
// e.g. 'Evicted'
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be
// scheduled right away as preemption victims receive their graceful termination periods.
// This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide
// to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to
// give the resources on this node to a higher priority pod that is created after preemption.
// As a result, this field may be different than PodSpec.nodeName when the pod is
// scheduled.
// +optional
NominatedNodeName string `json:"nominatedNodeName,omitempty" protobuf:"bytes,11,opt,name=nominatedNodeName"`
// IP address of the host to which the pod is assigned. Empty if not yet scheduled.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
// IP address allocated to the pod. Routable at least within the cluster.
// Empty if not yet allocated.
// +optional
PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
// RFC 3339 date and time at which the object was acknowledged by the Kubelet.
// This is before the Kubelet pulled the container image(s) for the pod.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
// The list has one entry per init container in the manifest. The most recent successful
// init container will have ready = true, the most recently started container will have
// startTime set.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
// The list has one entry per container in the manifest. Each entry is currently the output
// of `docker inspect`.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
// +optional
ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
// The Quality of Service (QOS) classification assigned to the pod based on resource requirements
// See PodQOSClass type for available QOS classes
// More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md
// +optional
QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
type PodStatusResult struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Pod is a collection of containers that can run on a host. This resource is created
// by clients and scheduled onto hosts.
type Pod struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodList is a list of Pods.
type PodList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pods.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md
Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PodTemplateSpec describes the data a pod should have when created from a template
type PodTemplateSpec struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplate describes a template for creating copies of a predefined pod.
type PodTemplate struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Template defines the pods that will be created from this pod template.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplateList is a list of PodTemplates.
type PodTemplateList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pod templates
Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ReplicationControllerSpec is the specification of a replication controller.
type ReplicationControllerSpec struct {
// Replicas is the number of desired replicas.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// Selector is a label query over pods that should match the Replicas count.
// If Selector is empty, it is defaulted to the labels present on the Pod template.
// Label keys and values that must match in order to be controlled by this replication
// controller, if empty defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// TemplateRef is a reference to an object that describes the pod that will be created if
// insufficient replicas are detected.
// Reference to an object that describes the pod that will be created if insufficient replicas are detected.
// +optional
// TemplateRef *ObjectReference `json:"templateRef,omitempty"`
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. This takes precedence over a TemplateRef.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
// +optional
Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// ReplicationControllerStatus represents the current status of a replication
// controller.
type ReplicationControllerStatus struct {
// Replicas is the most recently oberved number of replicas.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
// The number of pods that have labels matching the labels of the pod template of the replication controller.
// +optional
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
// The number of ready replicas for this replication controller.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
// The number of available replicas (ready for at least minReadySeconds) for this replication controller.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
// ObservedGeneration reflects the generation of the most recently observed replication controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
// Represents the latest available observations of a replication controller's current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
}
type ReplicationControllerConditionType string
// These are valid conditions of a replication controller.
const (
// ReplicationControllerReplicaFailure is added in a replication controller when one of its pods
// fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors,
// etc. or deleted due to kubelet being down or finalizers are failing.
ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure"
)
// ReplicationControllerCondition describes the state of a replication controller at a certain point.
type ReplicationControllerCondition struct {
// Type of replication controller condition.
Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationController represents the configuration of a replication controller.
type ReplicationController struct {
metav1.TypeMeta `json:",inline"`
// If the Labels of a ReplicationController are empty, they are defaulted to
// be the same as the Pod(s) that the replication controller manages.
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the specification of the desired behavior of the replication controller.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status is the most recently observed status of the replication controller.
// This data may be out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationControllerList is a collection of replication controllers.
type ReplicationControllerList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of replication controllers.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Session Affinity Type string
type ServiceAffinity string
const (
// ServiceAffinityClientIP is the Client IP based.
ServiceAffinityClientIP ServiceAffinity = "ClientIP"
// ServiceAffinityNone - no session affinity.
ServiceAffinityNone ServiceAffinity = "None"
)
const DefaultClientIPServiceAffinitySeconds int32 = 10800
// SessionAffinityConfig represents the configurations of session affinity.
type SessionAffinityConfig struct {
// clientIP contains the configurations of Client IP based session affinity.
// +optional
ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"`
}
// ClientIPConfig represents the configurations of Client IP based session affinity.
type ClientIPConfig struct {
// timeoutSeconds specifies the seconds of ClientIP type session sticky time.
// The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
// Default value is 10800(for 3 hours).
// +optional
TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
}
// Service Type string describes ingress methods for a service
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the cluster IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
// ServiceTypeExternalName means a service consists of only a reference to
// an external name that kubedns or equivalent will return as a CNAME
// record, with no exposing or proxying of any pods involved.
ServiceTypeExternalName ServiceType = "ExternalName"
)
// Service External Traffic Policy Type string
type ServiceExternalTrafficPolicyType string
const (
// ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior.
ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local"
// ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior.
ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
)
// ServiceStatus represents the current status of a service.
type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
// +optional
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
}
// LoadBalancerStatus represents the status of a load-balancer.
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer.
// Traffic intended for the service should be sent to these ingress points.
// +optional
Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
// +optional
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
}
// ServiceSpec describes the attributes that a user creates on a service.
type ServiceSpec struct {
// The list of ports that are exposed by this service.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +patchMergeKey=port
// +patchStrategy=merge
Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
// Route service traffic to pods with label keys and values matching this
// selector. If empty or not present, the service is assumed to have an
// external process managing its endpoints, which Kubernetes will not
// modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
// Ignored if type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// clusterIP is the IP address of the service and is usually assigned
// randomly by the master. If an address is specified manually and is not in
// use by others, it will be allocated to the service; otherwise, creation
// of the service will fail. This field can not be changed through updates.
// Valid values are "None", empty string (""), or a valid IP address. "None"
// can be specified for headless services when proxying is not required.
// Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if
// type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"`
// type determines how the Service is exposed. Defaults to ClusterIP. Valid
// options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
// "ExternalName" maps to the specified externalName.
// "ClusterIP" allocates a cluster-internal IP address for load-balancing to
// endpoints. Endpoints are determined by the selector or if that is not
// specified, by manual construction of an Endpoints object. If clusterIP is
// "None", no virtual IP is allocated and the endpoints are published as a
// set of endpoints rather than a stable IP.
// "NodePort" builds on ClusterIP and allocates a port on every node which
// routes to the clusterIP.
// "LoadBalancer" builds on NodePort and creates an
// external load-balancer (if supported in the current cloud) which routes
// to the clusterIP.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
// +optional
Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
// externalIPs is a list of IP addresses for which nodes in the cluster
// will also accept traffic for this service. These IPs are not managed by
// Kubernetes. The user is responsible for ensuring that traffic arrives
// at a node with this IP. A common example is external load-balancers
// that are not part of the Kubernetes system.
// +optional
ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"`
// Supports "ClientIP" and "None". Used to maintain session affinity.
// Enable client IP based session affinity.
// Must be ClientIP or None.
// Defaults to None.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"`
// Only applies to Service Type: LoadBalancer
// LoadBalancer will get created with the IP specified in this field.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
// +optional
LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
// If specified and supported by the platform, this will restrict traffic through the cloud-provider
// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
// cloud-provider does not support the feature."
// More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
// +optional
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
// externalName is the external reference that kubedns or equivalent will
// return as a CNAME record for this service. No proxying will be involved.
// Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
// and requires Type to be ExternalName.
// +optional
ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"`
// externalTrafficPolicy denotes if this Service desires to route external
// traffic to node-local or cluster-wide endpoints. "Local" preserves the
// client source IP and avoids a second hop for LoadBalancer and Nodeport
// type services, but risks potentially imbalanced traffic spreading.
// "Cluster" obscures the client source IP and may cause a second hop to
// another node, but should have good overall load-spreading.
// +optional
ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"`
// healthCheckNodePort specifies the healthcheck nodePort for the service.
// If not specified, HealthCheckNodePort is created by the service api
// backend with the allocated nodePort. Will use user-specified nodePort value
// if specified by the client. Only effects when Type is set to LoadBalancer
// and ExternalTrafficPolicy is set to Local.
// +optional
HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"`
// publishNotReadyAddresses, when set to true, indicates that DNS implementations
// must publish the notReadyAddresses of subsets for the Endpoints associated with
// the Service. The default value is false.
// The primary use case for setting this field is to use a StatefulSet's Headless Service
// to propagate SRV records for its Pods without respect to their readiness for purpose
// of peer discovery.
// +optional
PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
// sessionAffinityConfig contains the configurations of session affinity.
// +optional
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
}
// ServicePort contains information on service's port.
type ServicePort struct {
// The name of this port within the service. This must be a DNS_LABEL.
// All ports within a ServiceSpec must have unique names. This maps to
// the 'Name' field in EndpointPort objects.
// Optional if only one ServicePort is defined on this service.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The IP protocol for this port. Supports "TCP" and "UDP".
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
// The port that will be exposed by this service.
Port int32 `json:"port" protobuf:"varint,3,opt,name=port"`
// Number or name of the port to access on the pods targeted by the service.
// Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
// If this is a string, it will be looked up as a named port in the
// target Pod's container ports. If this is not specified, the value
// of the 'port' field is used (an identity map).
// This field is ignored for services with clusterIP=None, and should be
// omitted or set equal to the 'port' field.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
// +optional
TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"`
// The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
// Usually assigned by the system. If specified, it will be allocated to the service
// if unused or else creation of the service will fail.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
// +optional
NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"`
}
// +genclient
// +genclient:skipVerbs=deleteCollection
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Service is a named abstraction of software service (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the selector that determines which pods
// will answer requests sent through the proxy.
type Service struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a service.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the service.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
const (
// ClusterIPNone - do not assign a cluster IP
// no proxying required and no environment variables should be created for pods
ClusterIPNone = "None"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceList holds a list of services.
type ServiceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of services
Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccount binds together:
// * a name, understood by users, and perhaps by peripheral systems, for an identity
// * a principal that can be authenticated and authorized
// * a set of secrets
type ServiceAccount struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
// in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
// can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
// More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
// +optional
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"`
// AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
// Can be overridden at the pod level.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccountList is a list of ServiceAccount objects
type ServiceAccountList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ServiceAccounts.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Endpoints is a collection of endpoints that implement the actual service. Example:
// Name: "mysvc",
// Subsets: [
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// },
// {
// Addresses: [{"ip": "10.10.3.3"}],
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
type Endpoints struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The set of all endpoints is the union of all subsets. Addresses are placed into
// subsets according to the IPs they share. A single address with multiple ports,
// some of which are ready and some of which are not (because they come from
// different containers) will result in the address being displayed in different
// subsets for the different ports. No address will appear in both Addresses and
// NotReadyAddresses in the same subset.
// Sets of addresses and ports that comprise a service.
// +optional
Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"`
}
// EndpointSubset is a group of addresses with a common set of ports. The
// expanded set of endpoints is the Cartesian product of Addresses x Ports.
// For example, given:
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// }
// The resulting set of endpoints can be viewed as:
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
type EndpointSubset struct {
// IP addresses which offer the related ports that are marked as ready. These endpoints
// should be considered safe for load balancers and clients to utilize.
// +optional
Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"`
// IP addresses which offer the related ports but are not currently marked as ready
// because they have not yet finished starting, have recently failed a readiness check,
// or have recently failed a liveness check.
// +optional
NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"`
// Port numbers available on the related IP addresses.
// +optional
Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
}
// EndpointAddress is a tuple that describes single IP address.
type EndpointAddress struct {
// The IP of this endpoint.
// May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
// or link-local multicast ((224.0.0.0/24).
// IPv6 is also accepted but not fully supported on all platforms. Also, certain
// kubernetes components, like kube-proxy, are not IPv6 ready.
// TODO: This should allow hostname or IP, See #4447.
IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
// The Hostname of this endpoint
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
// Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
// +optional
NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"`
// Reference to object providing the endpoint.
// +optional
TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"`
}
// EndpointPort is a tuple that describes a single port.
type EndpointPort struct {
// The name of this port (corresponds to ServicePort.Name).
// Must be a DNS_LABEL.
// Optional only if one port is defined.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The port number of the endpoint.
Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
// The IP protocol for this port.
// Must be UDP or TCP.
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EndpointsList is a list of endpoints.
type EndpointsList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of endpoints.
Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// NodeSpec describes the attributes that a node is created with.
type NodeSpec struct {
// PodCIDR represents the pod IP range assigned to the node.
// +optional
PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
// ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
// +optional
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
// +optional
Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
// If specified, the node's taints.
// +optional
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
// If specified, the source to get node configuration from
// The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
// +optional
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
// Deprecated. Not all kubelets will set this field. Remove field after 1.13.
// see: https://issues.k8s.io/61966
// +optional
DoNotUse_ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
}
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
type NodeConfigSource struct {
// For historical context, regarding the below kind, apiVersion, and configMapRef deprecation tags:
// 1. kind/apiVersion were used by the kubelet to persist this struct to disk (they had no protobuf tags)
// 2. configMapRef and proto tag 1 were used by the API to refer to a configmap,
// but used a generic ObjectReference type that didn't really have the fields we needed
// All uses/persistence of the NodeConfigSource struct prior to 1.11 were gated by alpha feature flags,
// so there was no persisted data for these fields that needed to be migrated/handled.
// +k8s:deprecated=kind
// +k8s:deprecated=apiVersion
// +k8s:deprecated=configMapRef,protobuf=1
// ConfigMap is a reference to a Node's ConfigMap
ConfigMap *ConfigMapNodeConfigSource `json:"configMap,omitempty" protobuf:"bytes,2,opt,name=configMap"`
}
// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
type ConfigMapNodeConfigSource struct {
// Namespace is the metadata.namespace of the referenced ConfigMap.
// This field is required in all cases.
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
// Name is the metadata.name of the referenced ConfigMap.
// This field is required in all cases.
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
// UID is the metadata.UID of the referenced ConfigMap.
// This field is forbidden in Node.Spec, and required in Node.Status.
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"`
// ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap.
// This field is forbidden in Node.Spec, and required in Node.Status.
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
// KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure
// This field is required in all cases.
KubeletConfigKey string `json:"kubeletConfigKey" protobuf:"bytes,5,opt,name=kubeletConfigKey"`
}
// DaemonEndpoint contains information about a single Daemon endpoint.
type DaemonEndpoint struct {
/*
The port tag was not properly in quotes in earlier releases, so it must be
uppercased for backwards compat (since it was falling back to var name of
'Port').
*/
// Port number of the given endpoint.
Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"`
}
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
type NodeDaemonEndpoints struct {
// Endpoint on which Kubelet is listening.
// +optional
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"`
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
// Kubelet Version reported by the node.
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
// KubeProxy Version reported by the node.
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
}
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
type NodeConfigStatus struct {
// Assigned reports the checkpointed config the node will try to use.
// When Node.Spec.ConfigSource is updated, the node checkpoints the associated
// config payload to local disk, along with a record indicating intended
// config. The node refers to this record to choose its config checkpoint, and
// reports this record in Assigned. Assigned only updates in the status after
// the record has been checkpointed to disk. When the Kubelet is restarted,
// it tries to make the Assigned config the Active config by loading and
// validating the checkpointed payload identified by Assigned.
// +optional
Assigned *NodeConfigSource `json:"assigned,omitempty" protobuf:"bytes,1,opt,name=assigned"`
// Active reports the checkpointed config the node is actively using.
// Active will represent either the current version of the Assigned config,
// or the current LastKnownGood config, depending on whether attempting to use the
// Assigned config results in an error.
// +optional
Active *NodeConfigSource `json:"active,omitempty" protobuf:"bytes,2,opt,name=active"`
// LastKnownGood reports the checkpointed config the node will fall back to
// when it encounters an error attempting to use the Assigned config.
// The Assigned config becomes the LastKnownGood config when the node determines
// that the Assigned config is stable and correct.
// This is currently implemented as a 10-minute soak period starting when the local
// record of Assigned config is updated. If the Assigned config is Active at the end
// of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
// reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
// because the local default config is always assumed good.
// You should not make assumptions about the node's method of determining config stability
// and correctness, as this may change or become configurable in the future.
// +optional
LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" protobuf:"bytes,3,opt,name=lastKnownGood"`
// Error describes any problems reconciling the Spec.ConfigSource to the Active config.
// Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
// record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
// to load or validate the Assigned config, etc.
// Errors may occur at different points while syncing config. Earlier errors (e.g. download or
// checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
// Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
// a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
// by fixing the config assigned in Spec.ConfigSource.
// You can find additional information for debugging by searching the error message in the Kubelet log.
// Error is a human-readable description of the error state; machines can check whether or not Error
// is empty, but should not rely on the stability of the Error text across Kubelet versions.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// NodeStatus is information about the current status of a node.
type NodeStatus struct {
// Capacity represents the total resources of a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Allocatable represents the resources of a node that are available for scheduling.
// Defaults to Capacity.
// +optional
Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"`
// NodePhase is the recently observed lifecycle phase of the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
// The field is never populated, and now is deprecated.
// +optional
Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
// Conditions is an array of current observed node conditions.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
// List of addresses reachable to the node.
// Queried from cloud provider, if available.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
// Endpoints of daemons running on the Node.
// +optional
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
// Set of ids/uuids to uniquely identify the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#info
// +optional
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
// List of container images on this node
// +optional
Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
// List of attachable volumes in use (mounted) by the node.
// +optional
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
// List of volumes that are attached to the node.
// +optional
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
// Status of the config assigned to the node via the dynamic Kubelet config feature.
// +optional
Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
}
type UniqueVolumeName string
// AttachedVolume describes a volume attached to a node
type AttachedVolume struct {
// Name of the attached volume
Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"`
// DevicePath represents the device path where the volume should be available
DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"`
}
// AvoidPods describes pods that should avoid this node. This is the value for a
// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and
// will eventually become a field of NodeStatus.
type AvoidPods struct {
// Bounded-sized list of signatures of pods that should avoid this node, sorted
// in timestamp order from oldest to newest. Size of the slice is unspecified.
// +optional
PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"`
}
// Describes a class of pods that should avoid this node.
type PreferAvoidPodsEntry struct {
// The class of pods.
PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"`
// Time at which this entry was added to the list.
// +optional
EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"`
// (brief) reason why this entry was added to the list.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Human readable message indicating why this entry was added to the list.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
}
// Describes the class of pods that should avoid this node.
// Exactly one field should be set.
type PodSignature struct {
// Reference to controller whose pods should avoid this node.
// +optional
PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"`
}
// Describe a container image
type ContainerImage struct {
// Names by which this image is known.
// e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
Names []string `json:"names" protobuf:"bytes,1,rep,name=names"`
// The size of the image in bytes.
// +optional
SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"`
}
type NodePhase string
// These are the valid phases of node.
const (
// NodePending means the node has been created/added by the system, but not configured.
NodePending NodePhase = "Pending"
// NodeRunning means the node has been configured and has Kubernetes components running.
NodeRunning NodePhase = "Running"
// NodeTerminated means the node has been removed from the cluster.
NodeTerminated NodePhase = "Terminated"
)
type NodeConditionType string
// These are valid conditions of node. Currently, we don't have enough information to decide
// node condition. In the future, we will add more. The proposed set of conditions are:
// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable.
const (
// NodeReady means kubelet is healthy and ready to accept pods.
NodeReady NodeConditionType = "Ready"
// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
// space on the node.
NodeOutOfDisk NodeConditionType = "OutOfDisk"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
NodeDiskPressure NodeConditionType = "DiskPressure"
// NodePIDPressure means the kubelet is under pressure due to insufficient available PID.
NodePIDPressure NodeConditionType = "PIDPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
)
// NodeCondition contains condition information for a node.
type NodeCondition struct {
// Type of node condition.
Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we got an update on a given condition.
// +optional
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"`
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
type NodeAddressType string
// These are valid address type of node.
const (
NodeHostName NodeAddressType = "Hostname"
NodeExternalIP NodeAddressType = "ExternalIP"
NodeInternalIP NodeAddressType = "InternalIP"
NodeExternalDNS NodeAddressType = "ExternalDNS"
NodeInternalDNS NodeAddressType = "InternalDNS"
)
// NodeAddress contains information for the node's address.
type NodeAddress struct {
// Node address type, one of Hostname, ExternalIP or InternalIP.
Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"`
// The node address.
Address string `json:"address" protobuf:"bytes,2,opt,name=address"`
}
// ResourceName is the name identifying various resources in a ResourceList.
type ResourceName string
// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
// with the -, _, and . characters allowed anywhere, except the first or last character.
// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
// camel case, separating compound words.
// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
const (
// CPU, in cores. (500m = .5 cores)
ResourceCPU ResourceName = "cpu"
// Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceMemory ResourceName = "memory"
// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
ResourceStorage ResourceName = "storage"
// Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceEphemeralStorage is alpha and it can change across releases.
ResourceEphemeralStorage ResourceName = "ephemeral-storage"
)
const (
// Default namespace prefix.
ResourceDefaultNamespacePrefix = "kubernetes.io/"
// Name prefix for huge page resources (alpha).
ResourceHugePagesPrefix = "hugepages-"
)
// ResourceList is a set of (resource name, quantity) pairs.
type ResourceList map[ResourceName]resource.Quantity
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Node is a worker node in Kubernetes.
// Each node will have a unique identifier in the cache (i.e. in etcd).
type Node struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a node.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the node.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeList is the whole list of all Nodes which have been registered with master.
type NodeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of nodes
Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// FinalizerName is the name identifying a finalizer during namespace lifecycle.
type FinalizerName string
// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or
// in metav1.
const (
FinalizerKubernetes FinalizerName = "kubernetes"
)
// NamespaceSpec describes the attributes on a Namespace.
type NamespaceSpec struct {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional
Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"`
}
// NamespaceStatus is information about the current status of a Namespace.
type NamespaceStatus struct {
// Phase is the current lifecycle phase of the namespace.
// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional
Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"`
}
type NamespacePhase string
// These are the valid phases of a namespace.
const (
// NamespaceActive means the namespace is available for use in the system
NamespaceActive NamespacePhase = "Active"
// NamespaceTerminating means the namespace is undergoing graceful termination
NamespaceTerminating NamespacePhase = "Terminating"
)
// +genclient
// +genclient:nonNamespaced
// +genclient:skipVerbs=deleteCollection
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Namespace provides a scope for Names.
// Use of multiple namespaces is optional.
type Namespace struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of the Namespace.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status describes the current status of a Namespace.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NamespaceList is a list of Namespaces.
type NamespaceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of Namespace objects in the list.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
// Deprecated in 1.7, please use the bindings subresource of pods instead.
type Binding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The target object that you want to bind to the standard object.
Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"`
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
// +k8s:openapi-gen=false
type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodLogOptions is the query options for a Pod's logs REST call.
type PodLogOptions struct {
metav1.TypeMeta `json:",inline"`
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
// Follow the log stream of the pod. Defaults to false.
// +optional
Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
// Return previous terminated container logs. Defaults to false.
// +optional
Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
// A relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
// An RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output. Defaults to false.
// +optional
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
// If set, the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
// +optional
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
// If set, the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
// +optional
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodAttachOptions is the query options to a Pod's remote attach call.
// ---
// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodAttachOptions struct {
metav1.TypeMeta `json:",inline"`
// Stdin if true, redirects the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Stdout if true indicates that stdout is to be redirected for the attach call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Stderr if true indicates that stderr is to be redirected for the attach call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the attach call.
// This is passed through the container runtime so the tty
// is allocated on the worker node by the container runtime.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// The container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodExecOptions is the query options to a Pod's remote exec call.
// ---
// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodExecOptions struct {
metav1.TypeMeta `json:",inline"`
// Redirect the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Redirect the standard output stream of the pod for this call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Redirect the standard error stream of the pod for this call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the exec call.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// Container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
// Command is the remote command to execute. argv array. Not executed within a shell.
Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodPortForwardOptions is the query options to a Pod's port forward call
// when using WebSockets.
// The `port` query parameter must specify the port or
// ports (comma separated) to forward over.
// Port forwarding over SPDY does not use these options. It requires the port
// to be passed in the `port` header as part of request.
type PodPortForwardOptions struct {
metav1.TypeMeta `json:",inline"`
// List of ports to forward
// Required when using WebSockets
// +optional
Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodProxyOptions is the query options to a Pod's proxy call.
type PodProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to pod.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeProxyOptions is the query options to a Node's proxy call.
type NodeProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to node.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceProxyOptions is the query options to a Service's proxy call.
type ServiceProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the part of URLs that include service endpoints, suffixes,
// and parameters to use for the current proxy request to service.
// For example, the whole request URL is
// http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
// Path is _search?q=user:kimchy.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ObjectReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
// Namespace of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
// UID of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// API version of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"`
// Specific resourceVersion to which this reference is made, if any.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// If referring to a piece of an object instead of an entire object, this string
// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
// For example, if the object reference is to a container within a pod, this would take on a value like:
// "spec.containers{name}" (where "name" refers to the name of the container that triggered
// the event) or if no container name is specified "spec.containers[2]" (container with
// index 2 in this pod). This syntax is chosen only to have some well-defined way of
// referencing a part of an object.
// TODO: this design is not final and this field is subject to change in the future.
// +optional
FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"`
}
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// TODO: Add other useful fields. apiVersion, kind, uid?
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SerializedReference is a reference to serialized object.
type SerializedReference struct {
metav1.TypeMeta `json:",inline"`
// The reference to an object in the system.
// +optional
Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
}
// EventSource contains information for an event.
type EventSource struct {
// Component from which the event is generated.
// +optional
Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
// Node name on which the event is generated.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// Valid values for event types (new types could be added in future)
const (
// Information only and will not cause any problems
EventTypeNormal string = "Normal"
// These events are to warn that something might go wrong
EventTypeWarning string = "Warning"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Event is a report of an event somewhere in the cluster.
type Event struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
// The object that this event is about.
InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"`
// This should be a short, machine understandable string that gives the reason
// for the transition into the object's current status.
// TODO: provide exact specification for format.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// A human-readable description of the status of this operation.
// TODO: decide on maximum length.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// The component reporting this event. Should be a short machine understandable string.
// +optional
Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"`
// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
// +optional
FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"`
// The time at which the most recent occurrence of this event was recorded.
// +optional
LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"`
// The number of times this event has occurred.
// +optional
Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"`
// Type of this event (Normal, Warning), new types could be added in the future
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"`
// Time when this Event was first observed.
// +optional
EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"`
// Data about the Event series this event represents or nil if it's a singleton Event.
// +optional
Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"`
// What action was taken/failed regarding to the Regarding object.
// +optional
Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"`
// Optional secondary object for more complex actions.
// +optional
Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"`
// Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
// +optional
ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"`
// ID of the controller instance, e.g. `kubelet-xyzf`.
// +optional
ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"`
}
// EventSeries contain information on series of events, i.e. thing that was/is happening
// continuously for some time.
type EventSeries struct {
// Number of occurrences in this series up to the last heartbeat time
Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"`
// Time of the last occurrence observed
LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"`
// State of this Series: Ongoing or Finished
State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"`
}
type EventSeriesState string
const (
EventSeriesStateOngoing EventSeriesState = "Ongoing"
EventSeriesStateFinished EventSeriesState = "Finished"
EventSeriesStateUnknown EventSeriesState = "Unknown"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EventList is a list of events.
type EventList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of events
Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// List holds a list of objects, which may not be known by the server.
type List metav1.List
// LimitType is a type of object that is limited
type LimitType string
const (
// Limit that applies to all pods in a namespace
LimitTypePod LimitType = "Pod"
// Limit that applies to all containers in a namespace
LimitTypeContainer LimitType = "Container"
// Limit that applies to all persistent volume claims in a namespace
LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim"
)
// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
type LimitRangeItem struct {
// Type of resource that this limit applies to.
// +optional
Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
// Max usage constraints on this kind by resource name.
// +optional
Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"`
// Min usage constraints on this kind by resource name.
// +optional
Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"`
// Default resource requirement limit value by resource name if resource limit is omitted.
// +optional
Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"`
// DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
// +optional
DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"`
// MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
// +optional
MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"`
}
// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
type LimitRangeSpec struct {
// Limits is the list of LimitRangeItem objects that are enforced.
Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
type LimitRange struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the limits enforced.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRangeList is a list of LimitRange items.
type LimitRangeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of LimitRange objects.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// The following identify resource constants for Kubernetes object types
const (
// Pods, number
ResourcePods ResourceName = "pods"
// Services, number
ResourceServices ResourceName = "services"
// ReplicationControllers, number
ResourceReplicationControllers ResourceName = "replicationcontrollers"
// ResourceQuotas, number
ResourceQuotas ResourceName = "resourcequotas"
// ResourceSecrets, number
ResourceSecrets ResourceName = "secrets"
// ResourceConfigMaps, number
ResourceConfigMaps ResourceName = "configmaps"
// ResourcePersistentVolumeClaims, number
ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
// ResourceServicesNodePorts, number
ResourceServicesNodePorts ResourceName = "services.nodeports"
// ResourceServicesLoadBalancers, number
ResourceServicesLoadBalancers ResourceName = "services.loadbalancers"
// CPU request, in cores. (500m = .5 cores)
ResourceRequestsCPU ResourceName = "requests.cpu"
// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsMemory ResourceName = "requests.memory"
// Storage request, in bytes
ResourceRequestsStorage ResourceName = "requests.storage"
// Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage"
// CPU limit, in cores. (500m = .5 cores)
ResourceLimitsCPU ResourceName = "limits.cpu"
// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsMemory ResourceName = "limits.memory"
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
)
// The following identify resource prefix for Kubernetes object types
const (
// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
ResourceRequestsHugePagesPrefix = "requests.hugepages-"
// Default resource requests prefix
DefaultResourceRequestsPrefix = "requests."
)
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
type ResourceQuotaScope string
const (
// Match all pod objects where spec.activeDeadlineSeconds
ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
// Match all pod objects where !spec.activeDeadlineSeconds
ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
// Match all pod objects that have best effort quality of service
ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
// Match all pod objects that do not have best effort quality of service
ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
)
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
type ResourceQuotaSpec struct {
// Hard is the set of desired hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// A collection of filters that must match each object tracked by a quota.
// If not specified, the quota matches all objects.
// +optional
Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"`
}
// ResourceQuotaStatus defines the enforced hard limits and observed use.
type ResourceQuotaStatus struct {
// Hard is the set of enforced hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// Used is the current observed total usage of the resource in the namespace.
// +optional
Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuota sets aggregate quota restrictions enforced per namespace
type ResourceQuota struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired quota.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status defines the actual enforced quota and its current usage.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuotaList is a list of ResourceQuota items.
type ResourceQuotaList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of ResourceQuota objects.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Secret holds secret data of a certain type. The total bytes of the values in
// the Data field must be less than MaxSecretSize bytes.
type Secret struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data contains the secret data. Each key must consist of alphanumeric
// characters, '-', '_' or '.'. The serialized form of the secret data is a
// base64 encoded string, representing the arbitrary (possibly non-string)
// data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
// +optional
Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
// stringData allows specifying non-binary secret data in string form.
// It is provided as a write-only convenience method.
// All keys and values are merged into the data field on write, overwriting any existing values.
// It is never output when reading from the API.
// +k8s:conversion-gen=false
// +optional
StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"`
// Used to facilitate programmatic handling of secret data.
// +optional
Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"`
}
const MaxSecretSize = 1 * 1024 * 1024
type SecretType string
const (
// SecretTypeOpaque is the default. Arbitrary user-defined data
SecretTypeOpaque SecretType = "Opaque"
// SecretTypeServiceAccountToken contains a token that identifies a service account to the API
//
// Required fields:
// - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies
// - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies
// - Secret.Data["token"] - a token that identifies the service account to the API
SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
// ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountNameKey = "kubernetes.io/service-account.name"
// ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountUIDKey = "kubernetes.io/service-account.uid"
// ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets
ServiceAccountTokenKey = "token"
// ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets
ServiceAccountKubeconfigKey = "kubernetes.kubeconfig"
// ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets
ServiceAccountRootCAKey = "ca.crt"
// ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls
ServiceAccountNamespaceKey = "namespace"
// SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg
//
// Required fields:
// - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file
SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg"
// DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets
DockerConfigKey = ".dockercfg"
// SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json
//
// Required fields:
// - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file
SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson"
// DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets
DockerConfigJsonKey = ".dockerconfigjson"
// SecretTypeBasicAuth contains data needed for basic authentication.
//
// Required at least one of fields:
// - Secret.Data["username"] - username used for authentication
// - Secret.Data["password"] - password or token needed for authentication
SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth"
// BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets
BasicAuthUsernameKey = "username"
// BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets
BasicAuthPasswordKey = "password"
// SecretTypeSSHAuth contains data needed for SSH authetication.
//
// Required field:
// - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication
SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth"
// SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets
SSHAuthPrivateKey = "ssh-privatekey"
// SecretTypeTLS contains information about a TLS client or server secret. It
// is primarily used with TLS termination of the Ingress resource, but may be
// used in other types.
//
// Required fields:
// - Secret.Data["tls.key"] - TLS private key.
// Secret.Data["tls.crt"] - TLS certificate.
// TODO: Consider supporting different formats, specifying CA/destinationCA.
SecretTypeTLS SecretType = "kubernetes.io/tls"
// TLSCertKey is the key for tls certificates in a TLS secert.
TLSCertKey = "tls.crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls.key"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SecretList is a list of Secret.
type SecretList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of secret objects.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMap holds configuration data for pods to consume.
type ConfigMap struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data contains the configuration data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// Values with non-UTF-8 byte sequences must use the BinaryData field.
// The keys stored in Data must not overlap with the keys in
// the BinaryData field, this is enforced during validation process.
// +optional
Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
// BinaryData contains the binary data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// BinaryData can contain byte sequences that are not in the UTF-8 range.
// The keys stored in BinaryData must not overlap with the ones in
// the Data field, this is enforced during validation process.
// Using this field will require 1.10+ apiserver and
// kubelet.
// +optional
BinaryData map[string][]byte `json:"binaryData,omitempty" protobuf:"bytes,3,rep,name=binaryData"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMapList is a resource containing a list of ConfigMap objects.
type ConfigMapList struct {
metav1.TypeMeta `json:",inline"`
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of ConfigMaps.
Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Type and constants for component health validation.
type ComponentConditionType string
// These are the valid conditions for the component.
const (
ComponentHealthy ComponentConditionType = "Healthy"
)
// Information about the condition of a component.
type ComponentCondition struct {
// Type of condition for a component.
// Valid value: "Healthy"
Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"`
// Status of the condition for a component.
// Valid values for "Healthy": "True", "False", or "Unknown".
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Message about the condition for a component.
// For example, information about a health check.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// Condition error code for a component.
// For example, a health check error code.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
type ComponentStatus struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of component conditions observed
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Status of all the conditions for the component as a list of ComponentStatus objects.
type ComponentStatusList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ComponentStatus objects.
Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// DownwardAPIVolumeSource represents a volume containing downward API info.
// Downward API volumes support ownership management and SELinux relabeling.
type DownwardAPIVolumeSource struct {
// Items is a list of downward API volume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
const (
DownwardAPIVolumeSourceDefaultMode int32 = 0644
)
// DownwardAPIVolumeFile represents information to create the file containing the pod field
type DownwardAPIVolumeFile struct {
// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"`
}
// Represents downward API info for projecting into a projected volume.
// Note that this is identical to a downwardAPI volume source without the default
// mode.
type DownwardAPIProjection struct {
// Items is a list of DownwardAPIVolume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
}
// SecurityContext holds security configuration that will be applied to a container.
// Some fields are present in both SecurityContext and PodSecurityContext. When both
// are set, the values in SecurityContext take precedence.
type SecurityContext struct {
// The capabilities to add/drop when running containers.
// Defaults to the default set of capabilities granted by the container runtime.
// +optional
Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"`
// Run container in privileged mode.
// Processes in privileged containers are essentially equivalent to root on the host.
// Defaults to false.
// +optional
Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"`
// The SELinux context to be applied to the container.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"`
// The GID to run the entrypoint of the container process.
// Uses runtime default if unset.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,8,opt,name=runAsGroup"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"`
// Whether this container has a read-only root filesystem.
// Default is false.
// +optional
ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"`
// AllowPrivilegeEscalation controls whether a process can gain more
// privileges than its parent process. This bool directly controls if
// the no_new_privs flag will be set on the container process.
// AllowPrivilegeEscalation is true always when the container is:
// 1) run as Privileged
// 2) has CAP_SYS_ADMIN
// +optional
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
}
// SELinuxOptions are the labels to be applied to the container
type SELinuxOptions struct {
// User is a SELinux user label that applies to the container.
// +optional
User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"`
// Role is a SELinux role label that applies to the container.
// +optional
Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"`
// Type is a SELinux type label that applies to the container.
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"`
// Level is SELinux level label that applies to the container.
// +optional
Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RangeAllocation is not a public type.
type RangeAllocation struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Range is string that identifies the range represented by 'data'.
Range string `json:"range" protobuf:"bytes,2,opt,name=range"`
// Data is a bit array containing all allocated addresses in the previous segment.
Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"`
}
const (
// "default-scheduler" is the name of default scheduler.
DefaultSchedulerName = "default-scheduler"
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule.
// When the --hard-pod-affinity-weight scheduler flag is not specified,
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
DefaultHardPodAffinitySymmetricWeight int32 = 1
)
// Sysctl defines a kernel parameter to be set
type Sysctl struct {
// Name of a property to set
Name string `protobuf:"bytes,1,opt,name=name"`
// Value of a property to set
Value string `protobuf:"bytes,2,opt,name=value"`
}
// NodeResources is an object for conveying resource information about a node.
// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
type NodeResources struct {
// Capacity represents the available resources of a node
Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
}
const (
// Enable stdin for remote command execution
ExecStdinParam = "input"
// Enable stdout for remote command execution
ExecStdoutParam = "output"
// Enable stderr for remote command execution
ExecStderrParam = "error"
// Enable TTY for remote command execution
ExecTTYParam = "tty"
// Command to run for remote command execution
ExecCommandParam = "command"
// Name of header that specifies stream type
StreamType = "streamType"
// Value for streamType header for stdin stream
StreamTypeStdin = "stdin"
// Value for streamType header for stdout stream
StreamTypeStdout = "stdout"
// Value for streamType header for stderr stream
StreamTypeStderr = "stderr"
// Value for streamType header for data stream
StreamTypeData = "data"
// Value for streamType header for error stream
StreamTypeError = "error"
// Value for streamType header for terminal resize stream
StreamTypeResize = "resize"
// Name of header that specifies the port being forwarded
PortHeader = "port"
// Name of header that specifies a request ID used to associate the error
// and data streams for a single forwarded connection
PortForwardRequestIDHeader = "requestID"
)
add api for service account token volume projection
Kubernetes-commit: fd39d8277c83c34b3c0a855dd532ddeafd3d4bf6
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
// NamespaceDefault means the object is in the default namespace which is applied when not specified by clients
NamespaceDefault string = "default"
// NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces
NamespaceAll string = ""
)
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
type Volume struct {
// Volume's name.
// Must be a DNS_LABEL and unique within the pod.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// VolumeSource represents the location and type of the mounted volume.
// If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"`
}
// Represents the source of a volume to mount.
// Only one of its members may be specified.
type VolumeSource struct {
// HostPath represents a pre-existing file or directory on the host
// machine that is directly exposed to the container. This is generally
// used for system agents or other privileged things that are allowed
// to see the host machine. Most containers will NOT need this.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// ---
// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
// mount host directories as read/write.
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"`
// EmptyDir represents a temporary directory that shares a pod's lifetime.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"`
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"`
// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"`
// GitRepo represents a git repository at a particular revision.
// +optional
GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"`
// Secret represents a secret that should populate this volume.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"`
// NFS represents an NFS mount on the host that shares a pod's lifetime
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md
// +optional
ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"`
// Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"`
// PersistentVolumeClaimVolumeSource represents a reference to a
// PersistentVolumeClaim in the same namespace.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional
RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"`
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
// +optional
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"`
// Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"`
// DownwardAPI represents downward API about the pod that should populate this volume
// +optional
DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
// ConfigMap represents a configMap that should populate this volume
// +optional
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"`
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"`
// Items for all in one resources secrets, configmaps, and downward API
Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"`
// StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.
// +optional
StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
// This volume finds the bound PV and mounts that volume for the pod. A
// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
// type of volume that is owned by someone else (the system).
type PersistentVolumeClaimVolumeSource struct {
// ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"`
// Will force the ReadOnly setting in VolumeMounts.
// Default false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
}
// PersistentVolumeSource is similar to VolumeSource but meant for the
// administrator who creates PVs. Exactly one of its members must be set.
type PersistentVolumeSource struct {
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"`
// AWSElasticBlockStore represents an AWS Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"`
// HostPath represents a directory on the host.
// Provisioned by a developer or tester.
// This is useful for single-node development and testing only!
// On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"`
// Glusterfs represents a Glusterfs volume that is attached to a host and
// exposed to the pod. Provisioned by an admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
// +optional
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
// NFS represents an NFS mount on the host. Provisioned by an admin.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"`
// RBD represents a Rados Block Device mount on the host that shares a pod's lifetime.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
// +optional
RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"`
// ISCSI represents an ISCSI Disk resource that is attached to a
// kubelet's host machine and then exposed to the pod. Provisioned by an admin.
// +optional
ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"`
// Cinder represents a cinder volume attached and mounted on kubelets host machine
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"`
// CephFS represents a Ceph FS mount on the host that shares a pod's lifetime
// +optional
CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"`
// FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
// +optional
FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"`
// Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
// +optional
Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"`
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
// +optional
FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
// +optional
AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
// +optional
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
// Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
// +optional
Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"`
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
// +optional
AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"`
// PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine
PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"`
// PortworxVolume represents a portworx volume attached and mounted on kubelets host machine
// +optional
PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"`
// ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.
// +optional
ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"`
// Local represents directly-attached storage with node affinity
// +optional
Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"`
// StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod
// More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md
// +optional
StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"`
// CSI represents storage that handled by an external CSI driver (Beta feature).
// +optional
CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"`
}
const (
// BetaStorageClassAnnotation represents the beta/previous StorageClass annotation.
// It's currently still used and will be held for backwards compatibility
BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class"
// MountOptionAnnotation defines mount option annotation used in PVs
MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolume (PV) is a storage resource provisioned by an administrator.
// It is analogous to a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
type PersistentVolume struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines a specification of a persistent volume owned by the cluster.
// Provisioned by an administrator.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information/status for the persistent volume.
// Populated by the system.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes
// +optional
Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// PersistentVolumeSpec is the specification of a persistent volume.
type PersistentVolumeSpec struct {
// A description of the persistent volume's resources and capacity.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// The actual volume backing the persistent volume.
PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"`
// AccessModes contains all ways the volume can be mounted.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim.
// Expected to be non-nil when bound.
// claim.VolumeName is the authoritative bind between PV and PVC.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding
// +optional
ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"`
// What happens to a persistent volume when released from its claim.
// Valid options are Retain (default for manually created PersistentVolumes), Delete (default
// for dynamically provisioned PersistentVolumes), and Recycle (deprecated).
// Recycle must be supported by the volume plugin underlying this PersistentVolume.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
// +optional
PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"`
// Name of StorageClass to which this persistent volume belongs. Empty value
// means that this volume does not belong to any StorageClass.
// +optional
StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"`
// A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will
// simply fail if one is invalid.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
// +optional
MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
// volumeMode defines if a volume is intended to be used with a formatted filesystem
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
// This field influences the scheduling of pods that use this volume.
// +optional
NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"`
}
// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.
type VolumeNodeAffinity struct {
// Required specifies hard node constraints that must be met.
Required *NodeSelector `json:"required,omitempty" protobuf:"bytes,1,opt,name=required"`
}
// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes.
type PersistentVolumeReclaimPolicy string
const (
// PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim.
// The volume plugin must support Recycling.
PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle"
// PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim.
// The volume plugin must support Deletion.
PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete"
// PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator.
// The default policy is Retain.
PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain"
)
// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem.
type PersistentVolumeMode string
const (
// PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device.
PersistentVolumeBlock PersistentVolumeMode = "Block"
// PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem.
PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem"
)
// PersistentVolumeStatus is the current status of a persistent volume.
type PersistentVolumeStatus struct {
// Phase indicates if a volume is available, bound to a claim, or released by a claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase
// +optional
Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"`
// A human-readable message indicating details about why the volume is in this state.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
// Reason is a brief CamelCase string that describes any failure and is meant
// for machine parsing and tidy display in the CLI.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeList is a list of PersistentVolume items.
type PersistentVolumeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of persistent volumes.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes
Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaim is a user's request for and claim to a persistent volume
type PersistentVolumeClaim struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired characteristics of a volume requested by a pod author.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status represents the current information/status of a persistent volume claim.
// Read-only.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
// +optional
Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PersistentVolumeClaimList is a list of PersistentVolumeClaim items.
type PersistentVolumeClaimList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// A list of persistent volume claims.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PersistentVolumeClaimSpec describes the common attributes of storage devices
// and allows a Source for provider-specific attributes
type PersistentVolumeClaimSpec struct {
// AccessModes contains the desired access modes the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// A label query over volumes to consider for binding.
// +optional
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"`
// Resources represents the minimum resources the volume should have.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"`
// VolumeName is the binding reference to the PersistentVolume backing this claim.
// +optional
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"`
// Name of the StorageClass required by the claim.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
// +optional
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
// volumeMode defines what type of volume is required by the claim.
// Value of Filesystem is implied when not included in claim spec.
// This is an alpha feature and may change in the future.
// +optional
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
}
// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
type PersistentVolumeClaimConditionType string
const (
// PersistentVolumeClaimResizing - a user trigger resize of pvc has been started
PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing"
// PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node
PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending"
)
// PersistentVolumeClaimCondition contails details about state of pvc
type PersistentVolumeClaimCondition struct {
Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"`
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, this should be a short, machine understandable string that gives the reason
// for condition's last transition. If it reports "ResizeStarted" that means the underlying
// persistent volume is being resized.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
type PersistentVolumeClaimStatus struct {
// Phase represents the current phase of PersistentVolumeClaim.
// +optional
Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"`
// AccessModes contains the actual access modes the volume backing the PVC has.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
// +optional
AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"`
// Represents the actual resources of the underlying volume.
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Current Condition of persistent volume claim. If underlying persistent volume is being
// resized then the Condition will be set to 'ResizeStarted'.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
}
type PersistentVolumeAccessMode string
const (
// can be mounted read/write mode to exactly 1 host
ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce"
// can be mounted in read-only mode to many hosts
ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
// can be mounted in read/write mode to many hosts
ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
)
type PersistentVolumePhase string
const (
// used for PersistentVolumes that are not available
VolumePending PersistentVolumePhase = "Pending"
// used for PersistentVolumes that are not yet bound
// Available volumes are held by the binder and matched to PersistentVolumeClaims
VolumeAvailable PersistentVolumePhase = "Available"
// used for PersistentVolumes that are bound
VolumeBound PersistentVolumePhase = "Bound"
// used for PersistentVolumes where the bound PersistentVolumeClaim was deleted
// released volumes must be recycled before becoming available again
// this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource
VolumeReleased PersistentVolumePhase = "Released"
// used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim
VolumeFailed PersistentVolumePhase = "Failed"
)
type PersistentVolumeClaimPhase string
const (
// used for PersistentVolumeClaims that are not yet bound
ClaimPending PersistentVolumeClaimPhase = "Pending"
// used for PersistentVolumeClaims that are bound
ClaimBound PersistentVolumeClaimPhase = "Bound"
// used for PersistentVolumeClaims that lost their underlying
// PersistentVolume. The claim was bound to a PersistentVolume and this
// volume does not exist any longer and all data on it was lost.
ClaimLost PersistentVolumeClaimPhase = "Lost"
)
type HostPathType string
const (
// For backwards compatible, leave it empty if unset
HostPathUnset HostPathType = ""
// If nothing exists at the given path, an empty directory will be created there
// as needed with file mode 0755, having the same group and ownership with Kubelet.
HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate"
// A directory must exist at the given path
HostPathDirectory HostPathType = "Directory"
// If nothing exists at the given path, an empty file will be created there
// as needed with file mode 0644, having the same group and ownership with Kubelet.
HostPathFileOrCreate HostPathType = "FileOrCreate"
// A file must exist at the given path
HostPathFile HostPathType = "File"
// A UNIX socket must exist at the given path
HostPathSocket HostPathType = "Socket"
// A character device must exist at the given path
HostPathCharDev HostPathType = "CharDevice"
// A block device must exist at the given path
HostPathBlockDev HostPathType = "BlockDevice"
)
// Represents a host path mapped into a pod.
// Host path volumes do not support ownership management or SELinux relabeling.
type HostPathVolumeSource struct {
// Path of the directory on the host.
// If the path is a symlink, it will follow the link to the real path.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Type for HostPath Volume
// Defaults to ""
// More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
// +optional
Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"`
}
// Represents an empty directory for a pod.
// Empty directory volumes support ownership management and SELinux relabeling.
type EmptyDirVolumeSource struct {
// What type of storage medium should back this directory.
// The default is "" which means to use the node's default medium.
// Must be an empty string (default) or Memory.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
// +optional
Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"`
// Total amount of local storage required for this EmptyDir volume.
// The size limit is also applicable for memory medium.
// The maximum usage on memory medium EmptyDir would be the minimum value between
// the SizeLimit specified here and the sum of memory limits of all containers in a pod.
// The default is nil which means that the limit is undefined.
// More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
// +optional
SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"`
}
// Represents a Glusterfs mount that lasts the lifetime of a pod.
// Glusterfs volumes do not support ownership management or SELinux relabeling.
type GlusterfsVolumeSource struct {
// EndpointsName is the endpoint name that details Glusterfs topology.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
// Path is the Glusterfs volume path.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDVolumeSource struct {
// A collection of Ceph monitors.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// The rados image name.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// The rados pool name.
// Default is rbd.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// The rados user name.
// Default is admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// Keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// SecretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
// RBD volumes support ownership management and SELinux relabeling.
type RBDPersistentVolumeSource struct {
// A collection of Ceph monitors.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// The rados image name.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// The rados pool name.
// Default is rbd.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"`
// The rados user name.
// Default is admin.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"`
// Keyring is the path to key ring for RBDUser.
// Default is /etc/ceph/keyring.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"`
// SecretRef is name of the authentication secret for RBDUser. If provided
// overrides keyring.
// Default is nil.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"`
}
// Represents a cinder volume resource in Openstack.
// A Cinder volume must exist before mounting to a container.
// The volume must also be in the same region as the kubelet.
// Cinder volumes support ownership management and SELinux relabeling.
type CinderVolumeSource struct {
// volume id used to identify the volume in cinder
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// Optional: User is the rados user name, default is admin
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// SecretReference represents a Secret Reference. It has enough information to retrieve secret
// in any namespace
type SecretReference struct {
// Name is unique within a namespace to reference a secret resource.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Namespace defines the space within which the secret name must be unique.
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
}
// Represents a Ceph Filesystem mount that lasts the lifetime of a pod
// Cephfs volumes do not support ownership management or SELinux relabeling.
type CephFSPersistentVolumeSource struct {
// Required: Monitors is a collection of Ceph monitors
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"`
// Optional: Used as the mounted root, rather than the full Ceph tree, default is /
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"`
// Optional: User is the rados user name, default is admin
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"`
// Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"`
// Optional: SecretRef is reference to the authentication secret for User, default is empty.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
}
// Represents a Flocker volume mounted by the Flocker agent.
// One and only one of datasetName and datasetUUID should be set.
// Flocker volumes do not support ownership management or SELinux relabeling.
type FlockerVolumeSource struct {
// Name of the dataset stored as metadata -> name on the dataset for Flocker
// should be considered as deprecated
// +optional
DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"`
// UUID of the dataset. This is unique identifier of a Flocker dataset
// +optional
DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"`
}
// StorageMedium defines ways that storage can be allocated to a volume.
type StorageMedium string
const (
StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this
StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux)
StorageMediumHugePages StorageMedium = "HugePages" // use hugepages
)
// Protocol defines network protocols supported for things like container ports.
type Protocol string
const (
// ProtocolTCP is the TCP protocol.
ProtocolTCP Protocol = "TCP"
// ProtocolUDP is the UDP protocol.
ProtocolUDP Protocol = "UDP"
)
// Represents a Persistent Disk resource in Google Compute Engine.
//
// A GCE PD must exist before mounting to a container. The disk must
// also be in the same GCE project and zone as the kubelet. A GCE PD
// can only be mounted as read/write once or read-only many times. GCE
// PDs support ownership management and SELinux relabeling.
type GCEPersistentDiskVolumeSource struct {
// Unique name of the PD resource in GCE. Used to identify the disk in GCE.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// The partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a Quobyte mount that lasts the lifetime of a pod.
// Quobyte volumes do not support ownership management or SELinux relabeling.
type QuobyteVolumeSource struct {
// Registry represents a single or multiple Quobyte Registry services
// specified as a string as host:port pair (multiple entries are separated with commas)
// which acts as the central registry for volumes
Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"`
// Volume is a string that references an already created Quobyte volume by name.
Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"`
// ReadOnly here will force the Quobyte volume to be mounted with read-only permissions.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// User to map volume access to
// Defaults to serivceaccount user
// +optional
User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"`
// Group to map volume access to
// Default is no group
// +optional
Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"`
}
// FlexPersistentVolumeSource represents a generic persistent volume resource that is
// provisioned/attached using an exec based plugin.
type FlexPersistentVolumeSource struct {
// Driver is the name of the driver to use for this volume.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: SecretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: Extra command options if any.
// +optional
Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
}
// FlexVolume represents a generic volume resource that is
// provisioned/attached using an exec based plugin.
type FlexVolumeSource struct {
// Driver is the name of the driver to use for this volume.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Optional: SecretRef is reference to the secret object containing
// sensitive information to pass to the plugin scripts. This may be
// empty if no secret object is specified. If the secret object
// contains more than one secret, all secrets are passed to the plugin
// scripts.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: Extra command options if any.
// +optional
Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"`
}
// Represents a Persistent Disk resource in AWS.
//
// An AWS EBS disk must exist before mounting to a container. The disk
// must also be in the same AWS zone as the kubelet. An AWS EBS disk
// can only be mounted as read/write once. AWS EBS volumes support
// ownership management and SELinux relabeling.
type AWSElasticBlockStoreVolumeSource struct {
// Unique ID of the persistent disk resource in AWS (Amazon EBS volume).
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// The partition in the volume that you want to mount.
// If omitted, the default is to mount by volume name.
// Examples: For volume /dev/sda1, you specify the partition as "1".
// Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
// +optional
Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"`
// Specify "true" to force and set the ReadOnly property in VolumeMounts to "true".
// If omitted, the default is "false".
// More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
}
// Represents a volume that is populated with the contents of a git repository.
// Git repo volumes do not support ownership management.
// Git repo volumes support SELinux relabeling.
type GitRepoVolumeSource struct {
// Repository URL
Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"`
// Commit hash for the specified revision.
// +optional
Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"`
// Target directory name.
// Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
// git repository. Otherwise, if specified, the volume will contain the git repository in
// the subdirectory with the given name.
// +optional
Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"`
}
// Adapts a Secret into a volume.
//
// The contents of the target Secret's Data field will be presented in a volume
// as files using the keys in the Data field as the file names.
// Secret volumes support ownership management and SELinux relabeling.
type SecretVolumeSource struct {
// Name of the secret in the pod's namespace to use.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
// +optional
SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"`
// Specify whether the Secret or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
SecretVolumeSourceDefaultMode int32 = 0644
)
// Adapts a secret into a projected volume.
//
// The contents of the target Secret's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names.
// Note that this is identical to a secret volume source without the default
// mode.
type SecretProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Specify whether the Secret or its key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// Represents an NFS mount that lasts the lifetime of a pod.
// NFS volumes do not support ownership management or SELinux relabeling.
type NFSVolumeSource struct {
// Server is the hostname or IP address of the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Server string `json:"server" protobuf:"bytes,1,opt,name=server"`
// Path that is exported by the NFS server.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// ReadOnly here will force
// the NFS export to be mounted with read-only permissions.
// Defaults to false.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIVolumeSource struct {
// iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// Target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// iSCSI Target Lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// iSCSI Interface Name that uses an iSCSI transport.
// Defaults to 'default' (tcp).
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// CHAP Secret for iSCSI target and initiator authentication
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// Custom iSCSI Initiator Name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// ISCSIPersistentVolumeSource represents an ISCSI disk.
// ISCSI volumes can only be mounted as read/write once.
// ISCSI volumes support ownership management and SELinux relabeling.
type ISCSIPersistentVolumeSource struct {
// iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"`
// Target iSCSI Qualified Name.
IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"`
// iSCSI Target Lun number.
Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"`
// iSCSI Interface Name that uses an iSCSI transport.
// Defaults to 'default' (tcp).
// +optional
ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"`
// Filesystem type of the volume that you want to mount.
// Tip: Ensure that the filesystem type is supported by the host operating system.
// Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"`
// ReadOnly here will force the ReadOnly setting in VolumeMounts.
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"`
// iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port
// is other than default (typically TCP ports 860 and 3260).
// +optional
Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"`
// whether support iSCSI Discovery CHAP authentication
// +optional
DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"`
// whether support iSCSI Session CHAP authentication
// +optional
SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"`
// CHAP Secret for iSCSI target and initiator authentication
// +optional
SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"`
// Custom iSCSI Initiator Name.
// If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
// <target portal>:<volume name> will be created for the connection.
// +optional
InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"`
}
// Represents a Fibre Channel volume.
// Fibre Channel volumes can only be mounted as read/write once.
// Fibre Channel volumes support ownership management and SELinux relabeling.
type FCVolumeSource struct {
// Optional: FC target worldwide names (WWNs)
// +optional
TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"`
// Optional: FC target lun number
// +optional
Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// TODO: how do we prevent errors in the filesystem from compromising the machine
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Optional: Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// Optional: FC volume world wide identifiers (wwids)
// Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
// +optional
WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFileVolumeSource struct {
// the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
type AzureFilePersistentVolumeSource struct {
// the name of secret that contains Azure Storage Account Name and Key
SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"`
// Share Name
ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// the namespace of the secret that contains Azure Storage Account Name and Key
// default is the same as the Pod
// +optional
SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"`
}
// Represents a vSphere volume resource.
type VsphereVirtualDiskVolumeSource struct {
// Path that identifies vSphere volume vmdk
VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Storage Policy Based Management (SPBM) profile name.
// +optional
StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"`
// Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.
// +optional
StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"`
}
// Represents a Photon Controller persistent disk resource.
type PhotonPersistentDiskVolumeSource struct {
// ID that identifies Photon Controller persistent disk
PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
type AzureDataDiskCachingMode string
type AzureDataDiskKind string
const (
AzureDataDiskCachingNone AzureDataDiskCachingMode = "None"
AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly"
AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite"
AzureSharedBlobDisk AzureDataDiskKind = "Shared"
AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated"
AzureManagedDisk AzureDataDiskKind = "Managed"
)
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
type AzureDiskVolumeSource struct {
// The Name of the data disk in the blob storage
DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"`
// The URI the data disk in the blob storage
DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"`
// Host Caching mode: None, Read Only, Read Write.
// +optional
CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
// Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
}
// PortworxVolumeSource represents a Portworx volume resource.
type PortworxVolumeSource struct {
// VolumeID uniquely identifies a Portworx volume
VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"`
// FSType represents the filesystem type to mount
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// ScaleIOVolumeSource represents a persistent ScaleIO volume
type ScaleIOVolumeSource struct {
// The host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// The name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// The name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// The ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume
type ScaleIOPersistentVolumeSource struct {
// The host address of the ScaleIO API Gateway.
Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"`
// The name of the storage system as configured in ScaleIO.
System string `json:"system" protobuf:"bytes,2,opt,name=system"`
// SecretRef references to the secret for ScaleIO user and other
// sensitive information. If this is not provided, Login operation will fail.
SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"`
// Flag to enable/disable SSL communication with Gateway, default false
// +optional
SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"`
// The name of the ScaleIO Protection Domain for the configured storage.
// +optional
ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"`
// The ScaleIO Storage Pool associated with the protection domain.
// +optional
StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"`
// Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
// +optional
StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"`
// The name of a volume already created in the ScaleIO system
// that is associated with this volume source.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// VolumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// SecretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Represents a StorageOS persistent volume resource.
type StorageOSPersistentVolumeSource struct {
// VolumeName is the human-readable name of the StorageOS volume. Volume
// names are only unique within a namespace.
VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"`
// VolumeNamespace specifies the scope of the volume within StorageOS. If no
// namespace is specified then the Pod's namespace will be used. This allows the
// Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
// Set VolumeName to any name to override the default behaviour.
// Set to "default" if you are not using namespaces within StorageOS.
// Namespaces that do not pre-exist within StorageOS will be created.
// +optional
VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"`
// Defaults to false (read/write). ReadOnly here will force
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"`
// SecretRef specifies the secret to use for obtaining the StorageOS API
// credentials. If not specified, default values will be attempted.
// +optional
SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"`
}
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// volume as files using the keys in the Data field as the file names, unless
// the items element is populated with specific mappings of keys to paths.
// ConfigMap volumes support ownership management and SELinux relabeling.
type ConfigMapVolumeSource struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
const (
ConfigMapVolumeSourceDefaultMode int32 = 0644
)
// Adapts a ConfigMap into a projected volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
// projected volume as files using the keys in the Data field as the file names,
// unless the items element is populated with specific mappings of keys to paths.
// Note that this is identical to a configmap volume source without the default
// mode.
type ConfigMapProjection struct {
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// If unspecified, each key-value pair in the Data field of the referenced
// ConfigMap will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the ConfigMap,
// the volume setup will error unless it is marked optional. Paths must be
// relative and may not contain the '..' path or start with '..'.
// +optional
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
// Specify whether the ConfigMap or it's keys must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"`
}
// ServiceAccountTokenProjection represents a projected service account token
// volume. This projection can be used to insert a service account token into
// the pods runtime filesystem for use against APIs (Kubernetes API Server or
// otherwise).
type ServiceAccountTokenProjection struct {
// Audience is the intended audience of the token. A recipient of a token
// must identify itself with an identifier specified in the audience of the
// token, and otherwise should reject the token. The audience defaults to the
// identifier of the apiserver.
//+optional
Audience string `json:"audience,omitempty" protobuf:"bytes,1,rep,name=audience"`
// ExpirationSeconds is the requested duration of validity of the service
// account token. As the token approaches expiration, the kubelet volume
// plugin will proactively rotate the service account token. The kubelet will
// start trying to rotate the token if the token is older than 80 percent of
// its time to live or if the token is older than 24 hours.Defaults to 1 hour
// and must be at least 10 minutes.
//+optional
ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"`
// Path is the path relative to the mount point of the file to project the
// token into.
Path string `json:"path" protobuf:"bytes,3,opt,name=path"`
}
// Represents a projected volume source
type ProjectedVolumeSource struct {
// list of volume projections
Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"`
// Mode bits to use on created files by default. Must be a value between
// 0 and 0777.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
// Projection that may be projected along with other supported volume types
type VolumeProjection struct {
// all types below are the supported types for projection into the same volume
// information about the secret data to project
// +optional
Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"`
// information about the downwardAPI data to project
// +optional
DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"`
// information about the configMap data to project
// +optional
ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"`
// information about the serviceAccountToken data to project
// +optional
ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"`
}
const (
ProjectedVolumeSourceDefaultMode int32 = 0644
)
// Maps a string key to a path within a volume.
type KeyToPath struct {
// The key to project.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// The relative path of the file to map the key to.
// May not be an absolute path.
// May not contain the path element '..'.
// May not start with the string '..'.
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"`
}
// Local represents directly-attached storage with node affinity (Beta feature)
type LocalVolumeSource struct {
// The full path to the volume on the node.
// It can be either a directory or block device (disk, partition, ...).
// Directories can be represented only by PersistentVolume with VolumeMode=Filesystem.
// Block devices can be represented only by VolumeMode=Block, which also requires the
// BlockVolume alpha feature gate to be enabled.
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
}
// Represents storage that is managed by an external CSI volume driver (Beta feature)
type CSIPersistentVolumeSource struct {
// Driver is the name of the driver to use for this volume.
// Required.
Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"`
// VolumeHandle is the unique volume name returned by the CSI volume
// plugin’s CreateVolume to refer to the volume on all subsequent calls.
// Required.
VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"`
// Optional: The value to pass to ControllerPublishVolumeRequest.
// Defaults to false (read/write).
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
// +optional
FSType string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"`
// Attributes of the volume to publish.
// +optional
VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,5,rep,name=volumeAttributes"`
// ControllerPublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// ControllerPublishVolume and ControllerUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"`
// NodeStageSecretRef is a reference to the secret object containing sensitive
// information to pass to the CSI driver to complete the CSI NodeStageVolume
// and NodeStageVolume and NodeUnstageVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"`
// NodePublishSecretRef is a reference to the secret object containing
// sensitive information to pass to the CSI driver to complete the CSI
// NodePublishVolume and NodeUnpublishVolume calls.
// This field is optional, and may be empty if no secret is required. If the
// secret object contains more than one secret, all secrets are passed.
// +optional
NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"`
}
// ContainerPort represents a network port in a single container.
type ContainerPort struct {
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
// named port in a pod must have a unique name. Name for the port that can be
// referred to by services.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// Number of port to expose on the host.
// If specified, this must be a valid port number, 0 < x < 65536.
// If HostNetwork is specified, this must match ContainerPort.
// Most containers do not need this.
// +optional
HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"`
// Number of port to expose on the pod's IP address.
// This must be a valid port number, 0 < x < 65536.
ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"`
// Protocol for port. Must be UDP or TCP.
// Defaults to "TCP".
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"`
// What host IP to bind the external port to.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
}
// VolumeMount describes a mounting of a Volume within a container.
type VolumeMount struct {
// This must match the Name of a Volume.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Mounted read-only if true, read-write otherwise (false or unspecified).
// Defaults to false.
// +optional
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
// Path within the container at which the volume should be mounted. Must
// not contain ':'.
MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"`
// Path within the volume from which the container's volume should be mounted.
// Defaults to "" (volume's root).
// +optional
SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"`
// mountPropagation determines how mounts are propagated from the host
// to container and the other way around.
// When not set, MountPropagationHostToContainer is used.
// This field is beta in 1.10.
// +optional
MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"`
}
// MountPropagationMode describes mount propagation.
type MountPropagationMode string
const (
// MountPropagationNone means that the volume in a container will
// not receive new mounts from the host or other containers, and filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode corresponds to "private" in Linux terminology.
MountPropagationNone MountPropagationMode = "None"
// MountPropagationHostToContainer means that the volume in a container will
// receive new mounts from the host or other containers, but filesystems
// mounted inside the container won't be propagated to the host or other
// containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rslave" in Linux terminology).
MountPropagationHostToContainer MountPropagationMode = "HostToContainer"
// MountPropagationBidirectional means that the volume in a container will
// receive new mounts from the host or other containers, and its own mounts
// will be propagated from the container to the host or other containers.
// Note that this mode is recursively applied to all mounts in the volume
// ("rshared" in Linux terminology).
MountPropagationBidirectional MountPropagationMode = "Bidirectional"
)
// volumeDevice describes a mapping of a raw block device within a container.
type VolumeDevice struct {
// name must match the name of a persistentVolumeClaim in the pod
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// devicePath is the path inside of the container that the device will be mapped to.
DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"`
}
// EnvVar represents an environment variable present in a Container.
type EnvVar struct {
// Name of the environment variable. Must be a C_IDENTIFIER.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Optional: no more than one of the following may be specified.
// Variable references $(VAR_NAME) are expanded
// using the previous defined environment variables in the container and
// any service environment variables. If a variable cannot be resolved,
// the reference in the input string will be unchanged. The $(VAR_NAME)
// syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
// references will never be expanded, regardless of whether the variable
// exists or not.
// Defaults to "".
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Source for the environment variable's value. Cannot be used if value is not empty.
// +optional
ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"`
}
// EnvVarSource represents a source for the value of an EnvVar.
type EnvVarSource struct {
// Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations,
// spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"`
// Selects a key of a ConfigMap.
// +optional
ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"`
// Selects a key of a secret in the pod's namespace
// +optional
SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"`
}
// ObjectFieldSelector selects an APIVersioned field of an object.
type ObjectFieldSelector struct {
// Version of the schema the FieldPath is written in terms of, defaults to "v1".
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"`
// Path of the field to select in the specified API version.
FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"`
}
// ResourceFieldSelector represents container resources (cpu, memory) and their output format
type ResourceFieldSelector struct {
// Container name: required for volumes, optional for env vars
// +optional
ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"`
// Required: resource to select
Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"`
// Specifies the output format of the exposed resources, defaults to "1"
// +optional
Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"`
}
// Selects a key from a ConfigMap.
type ConfigMapKeySelector struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key to select.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the ConfigMap or it's key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// SecretKeySelector selects a key of a Secret.
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// The key of the secret to select from. Must be a valid secret key.
Key string `json:"key" protobuf:"bytes,2,opt,name=key"`
// Specify whether the Secret or it's key must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"`
}
// EnvFromSource represents the source of a set of ConfigMaps
type EnvFromSource struct {
// An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.
// +optional
Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"`
// The ConfigMap to select from
// +optional
ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"`
// The Secret to select from
// +optional
SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"`
}
// ConfigMapEnvSource selects a ConfigMap to populate the environment
// variables with.
//
// The contents of the target ConfigMap's Data field will represent the
// key-value pairs as environment variables.
type ConfigMapEnvSource struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the ConfigMap must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// SecretEnvSource selects a Secret to populate the environment
// variables with.
//
// The contents of the target Secret's Data field will represent the
// key-value pairs as environment variables.
type SecretEnvSource struct {
// The Secret to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
// Specify whether the Secret must be defined
// +optional
Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"`
}
// HTTPHeader describes a custom header to be used in HTTP probes
type HTTPHeader struct {
// The header field name
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// The header field value
Value string `json:"value" protobuf:"bytes,2,opt,name=value"`
}
// HTTPGetAction describes an action based on HTTP Get requests.
type HTTPGetAction struct {
// Path to access on the HTTP server.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
// Name or number of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"`
// Host name to connect to, defaults to the pod IP. You probably want to set
// "Host" in httpHeaders instead.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"`
// Scheme to use for connecting to the host.
// Defaults to HTTP.
// +optional
Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"`
// Custom headers to set in the request. HTTP allows repeated headers.
// +optional
HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"`
}
// URIScheme identifies the scheme used for connection to a host for Get actions
type URIScheme string
const (
// URISchemeHTTP means that the scheme used will be http://
URISchemeHTTP URIScheme = "HTTP"
// URISchemeHTTPS means that the scheme used will be https://
URISchemeHTTPS URIScheme = "HTTPS"
)
// TCPSocketAction describes an action based on opening a socket
type TCPSocketAction struct {
// Number or name of the port to access on the container.
// Number must be in the range 1 to 65535.
// Name must be an IANA_SVC_NAME.
Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"`
// Optional: Host name to connect to, defaults to the pod IP.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// ExecAction describes a "run in container" action.
type ExecAction struct {
// Command is the command line to execute inside the container, the working directory for the
// command is root ('/') in the container's filesystem. The command is simply exec'd, it is
// not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
// a shell, you need to explicitly call out to that shell.
// Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"`
}
// Probe describes a health check to be performed against a container to determine whether it is
// alive or ready to receive traffic.
type Probe struct {
// The action taken to determine the health of a container
Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"`
// Number of seconds after the container has started before liveness probes are initiated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"`
// Number of seconds after which the probe times out.
// Defaults to 1 second. Minimum value is 1.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"`
// How often (in seconds) to perform the probe.
// Default to 10 seconds. Minimum value is 1.
// +optional
PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"`
// Minimum consecutive successes for the probe to be considered successful after having failed.
// Defaults to 1. Must be 1 for liveness. Minimum value is 1.
// +optional
SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"`
// Minimum consecutive failures for the probe to be considered failed after having succeeded.
// Defaults to 3. Minimum value is 1.
// +optional
FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"`
}
// PullPolicy describes a policy for if/when to pull a container image
type PullPolicy string
const (
// PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
PullAlways PullPolicy = "Always"
// PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
PullNever PullPolicy = "Never"
// PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
PullIfNotPresent PullPolicy = "IfNotPresent"
)
// TerminationMessagePolicy describes how termination messages are retrieved from a container.
type TerminationMessagePolicy string
const (
// TerminationMessageReadFile is the default behavior and will set the container status message to
// the contents of the container's terminationMessagePath when the container exits.
TerminationMessageReadFile TerminationMessagePolicy = "File"
// TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs
// for the container status message when the container exits with an error and the
// terminationMessagePath has no contents.
TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError"
)
// Capability represent POSIX capabilities type
type Capability string
// Adds and removes POSIX capabilities from running containers.
type Capabilities struct {
// Added capabilities
// +optional
Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"`
// Removed capabilities
// +optional
Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"`
}
// ResourceRequirements describes the compute resource requirements.
type ResourceRequirements struct {
// Limits describes the maximum amount of compute resources allowed.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
// Requests describes the minimum amount of compute resources required.
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
// otherwise to an implementation-defined value.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
// +optional
Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
}
const (
// TerminationMessagePathDefault means the default path to capture the application termination message running in a container
TerminationMessagePathDefault string = "/dev/termination-log"
)
// A single application container that you want to run within a pod.
type Container struct {
// Name of the container specified as a DNS_LABEL.
// Each container in a pod must have a unique name (DNS_LABEL).
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Docker image name.
// More info: https://kubernetes.io/docs/concepts/containers/images
// This field is optional to allow higher level config management to default or override
// container images in workload controllers like Deployments and StatefulSets.
// +optional
Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"`
// Entrypoint array. Not executed within a shell.
// The docker image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The docker image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
// cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
// can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
// regardless of whether the variable exists or not.
// Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
// Container's working directory.
// If not specified, the container runtime's default will be used, which
// might be configured in the container image.
// Cannot be updated.
// +optional
WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"`
// List of ports to expose from the container. Exposing a port here gives
// the system additional information about the network connections a
// container uses, but is primarily informational. Not specifying a port here
// DOES NOT prevent that port from being exposed. Any port which is
// listening on the default "0.0.0.0" address inside a container will be
// accessible from the network.
// Cannot be updated.
// +optional
// +patchMergeKey=containerPort
// +patchStrategy=merge
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
// List of sources to populate environment variables in the container.
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
// will be reported as an event when the container is starting. When a key exists in multiple
// sources, the value associated with the last source will take precedence.
// Values defined by an Env with a duplicate key will take precedence.
// Cannot be updated.
// +optional
EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"`
// List of environment variables to set in the container.
// Cannot be updated.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"`
// Compute Resources required by this container.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
// +optional
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
// +optional
// +patchMergeKey=mountPath
// +patchStrategy=merge
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
// volumeDevices is the list of block devices to be used by the container.
// This is an alpha feature and may change in the future.
// +patchMergeKey=devicePath
// +patchStrategy=merge
// +optional
VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"`
// Periodic probe of container liveness.
// Container will be restarted if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"`
// Periodic probe of container service readiness.
// Container will be removed from service endpoints if the probe fails.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"`
// Actions that the management system should take in response to container lifecycle events.
// Cannot be updated.
// +optional
Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"`
// Optional: Path at which the file to which the container's termination message
// will be written is mounted into the container's filesystem.
// Message written is intended to be brief final status, such as an assertion failure message.
// Will be truncated by the node if greater than 4096 bytes. The total message length across
// all containers will be limited to 12kb.
// Defaults to /dev/termination-log.
// Cannot be updated.
// +optional
TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"`
// Indicate how the termination message should be populated. File will use the contents of
// terminationMessagePath to populate the container status message on both success and failure.
// FallbackToLogsOnError will use the last chunk of container log output if the termination
// message file is empty and the container exited with an error.
// The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
// Defaults to File.
// Cannot be updated.
// +optional
TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
// Security options the pod should run with.
// More info: https://kubernetes.io/docs/concepts/policy/security-context/
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
// Variables for interactive containers, these have very specialized use-cases (e.g. debugging)
// and shouldn't be used for general purpose containers.
// Whether this container should allocate a buffer for stdin in the container runtime. If this
// is not set, reads from stdin in the container will always result in EOF.
// Default is false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"`
// Whether the container runtime should close the stdin channel after it has been opened by
// a single attach. When stdin is true the stdin stream will remain open across multiple attach
// sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
// first client attaches to stdin, and then remains open and accepts data until the client disconnects,
// at which time stdin is closed and remains closed until the container is restarted. If this
// flag is false, a container processes that reads from stdin will never receive an EOF.
// Default is false
// +optional
StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"`
// Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
// Default is false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"`
}
// Handler defines a specific action that should be taken
// TODO: pass structured data to these actions, and document that data here.
type Handler struct {
// One and only one of the following should be specified.
// Exec specifies the action to take.
// +optional
Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"`
// HTTPGet specifies the http request to perform.
// +optional
HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"`
// TCPSocket specifies an action involving a TCP port.
// TCP hooks not yet supported
// TODO: implement a realistic TCP lifecycle hook
// +optional
TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"`
}
// Lifecycle describes actions that the management system should take in response to container lifecycle
// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks
// until the action is complete, unless the container process fails, in which case the handler is aborted.
type Lifecycle struct {
// PostStart is called immediately after a container is created. If the handler fails,
// the container is terminated and restarted according to its restart policy.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"`
// PreStop is called immediately before a container is terminated.
// The container is terminated after the handler completes.
// The reason for termination is passed to the handler.
// Regardless of the outcome of the handler, the container is eventually terminated.
// Other management of the container blocks until the hook completes.
// More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
// +optional
PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"`
}
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// ContainerStateWaiting is a waiting state of a container.
type ContainerStateWaiting struct {
// (brief) reason the container is not yet running.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"`
// Message regarding why the container is not yet running.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
}
// ContainerStateRunning is a running state of a container.
type ContainerStateRunning struct {
// Time at which the container was last (re-)started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"`
}
// ContainerStateTerminated is a terminated state of a container.
type ContainerStateTerminated struct {
// Exit status from the last termination of the container
ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"`
// Signal from the last termination of the container
// +optional
Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"`
// (brief) reason from the last termination of the container
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Message regarding the last termination of the container
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// Time at which previous execution of the container started
// +optional
StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"`
// Time at which the container last terminated
// +optional
FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"`
// Container's ID in the format 'docker://<container_id>'
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"`
}
// ContainerState holds a possible state of container.
// Only one of its members may be specified.
// If none of them is specified, the default one is ContainerStateWaiting.
type ContainerState struct {
// Details about a waiting container
// +optional
Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"`
// Details about a running container
// +optional
Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"`
// Details about a terminated container
// +optional
Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"`
}
// ContainerStatus contains details for the current status of this container.
type ContainerStatus struct {
// This must be a DNS_LABEL. Each container in a pod must have a unique name.
// Cannot be updated.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Details about the container's current condition.
// +optional
State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"`
// Details about the container's last termination condition.
// +optional
LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"`
// Specifies whether the container has passed its readiness probe.
Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"`
// The number of times the container has been restarted, currently based on
// the number of dead containers that have not yet been removed.
// Note that this is calculated from dead containers. But those containers are subject to
// garbage collection. This value will get capped at 5 by GC.
RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"`
// The image the container is running.
// More info: https://kubernetes.io/docs/concepts/containers/images
// TODO(dchen1107): Which image the container is running with?
Image string `json:"image" protobuf:"bytes,6,opt,name=image"`
// ImageID of the container's image.
ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"`
// Container's ID in the format 'docker://<container_id>'.
// +optional
ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"`
}
// PodPhase is a label for the condition of a pod at the current time.
type PodPhase string
// These are the valid statuses of pods.
const (
// PodPending means the pod has been accepted by the system, but one or more of the containers
// has not been started. This includes time before being bound to a node, as well as time spent
// pulling images onto the host.
PodPending PodPhase = "Pending"
// PodRunning means the pod has been bound to a node and all of the containers have been started.
// At least one container is still running or is in the process of being restarted.
PodRunning PodPhase = "Running"
// PodSucceeded means that all containers in the pod have voluntarily terminated
// with a container exit code of 0, and the system is not going to restart any of these containers.
PodSucceeded PodPhase = "Succeeded"
// PodFailed means that all containers in the pod have terminated, and at least one container has
// terminated in a failure (exited with a non-zero exit code or was stopped by the system).
PodFailed PodPhase = "Failed"
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
// to an error in communicating with the host of the pod.
PodUnknown PodPhase = "Unknown"
)
// PodConditionType is a valid value for PodCondition.Type
type PodConditionType string
// These are valid conditions of pod.
const (
// PodScheduled represents status of the scheduling process for this pod.
PodScheduled PodConditionType = "PodScheduled"
// PodReady means the pod is able to service requests and should be added to the
// load balancing pools of all matching services.
PodReady PodConditionType = "Ready"
// PodInitialized means that all init containers in the pod have started successfully.
PodInitialized PodConditionType = "Initialized"
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
PodReasonUnschedulable = "Unschedulable"
)
// PodCondition contains details for the current condition of this pod.
type PodCondition struct {
// Type is the type of the condition.
// Currently only Ready.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"`
// Status is the status of the condition.
// Can be True, False, Unknown.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// Unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
// RestartPolicy describes how the container should be restarted.
// Only one of the following restart policies may be specified.
// If none of the following policies is specified, the default one
// is RestartPolicyAlways.
type RestartPolicy string
const (
RestartPolicyAlways RestartPolicy = "Always"
RestartPolicyOnFailure RestartPolicy = "OnFailure"
RestartPolicyNever RestartPolicy = "Never"
)
// DNSPolicy defines how a pod's DNS will be configured.
type DNSPolicy string
const (
// DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS
// first, if it is available, then fall back on the default
// (as determined by kubelet) DNS settings.
DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet"
// DNSClusterFirst indicates that the pod should use cluster DNS
// first unless hostNetwork is true, if it is available, then
// fall back on the default (as determined by kubelet) DNS settings.
DNSClusterFirst DNSPolicy = "ClusterFirst"
// DNSDefault indicates that the pod should use the default (as
// determined by kubelet) DNS settings.
DNSDefault DNSPolicy = "Default"
// DNSNone indicates that the pod should use empty DNS settings. DNS
// parameters such as nameservers and search paths should be defined via
// DNSConfig.
DNSNone DNSPolicy = "None"
)
const (
// DefaultTerminationGracePeriodSeconds indicates the default duration in
// seconds a pod needs to terminate gracefully.
DefaultTerminationGracePeriodSeconds = 30
)
// A node selector represents the union of the results of one or more label queries
// over a set of nodes; that is, it represents the OR of the selectors represented
// by the node selector terms.
type NodeSelector struct {
//Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
}
// A null or empty node selector term matches no objects. The requirements of
// them are ANDed.
type NodeSelectorTerm struct {
// A list of node selector requirements by node's labels.
// +optional
MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"`
// A list of node selector requirements by node's fields.
// +optional
MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" protobuf:"bytes,2,rep,name=matchFields"`
}
// A node selector requirement is a selector that contains values, a key, and an operator
// that relates the key and values.
type NodeSelectorRequirement struct {
// The label key that the selector applies to.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Represents a key's relationship to a set of values.
// Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"`
// An array of string values. If the operator is In or NotIn,
// the values array must be non-empty. If the operator is Exists or DoesNotExist,
// the values array must be empty. If the operator is Gt or Lt, the values
// array must have a single element, which will be interpreted as an integer.
// This array is replaced during a strategic merge patch.
// +optional
Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"`
}
// A node selector operator is the set of operators that can be used in
// a node selector requirement.
type NodeSelectorOperator string
const (
NodeSelectorOpIn NodeSelectorOperator = "In"
NodeSelectorOpNotIn NodeSelectorOperator = "NotIn"
NodeSelectorOpExists NodeSelectorOperator = "Exists"
NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist"
NodeSelectorOpGt NodeSelectorOperator = "Gt"
NodeSelectorOpLt NodeSelectorOperator = "Lt"
)
// Affinity is a group of affinity scheduling rules.
type Affinity struct {
// Describes node affinity scheduling rules for the pod.
// +optional
NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"`
// Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"`
// Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
// +optional
PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"`
}
// Pod affinity is a group of inter pod affinity scheduling rules.
type PodAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// Pod anti affinity is a group of inter pod anti affinity scheduling rules.
type PodAntiAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system will try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the anti-affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the anti-affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to a pod label update), the
// system may or may not try to eventually evict the pod from its node.
// When there are multiple elements, the lists of nodes corresponding to each
// podAffinityTerm are intersected, i.e. all terms must be satisfied.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the anti-affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling anti-affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
type WeightedPodAffinityTerm struct {
// weight associated with matching the corresponding podAffinityTerm,
// in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// Required. A pod affinity term, associated with the corresponding weight.
PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"`
}
// Defines a set of pods (namely those matching the labelSelector
// relative to the given namespace(s)) that this pod should be
// co-located (affinity) or not co-located (anti-affinity) with,
// where co-located is defined as running on a node whose value of
// the label with key <topologyKey> matches that of any node on which
// a pod of the set of pods is running
type PodAffinityTerm struct {
// A label query over a set of resources, in this case pods.
// +optional
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"`
// namespaces specifies which namespaces the labelSelector applies to (matches against);
// null or empty list means "this pod's namespace"
// +optional
Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"`
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
// the labelSelector in the specified namespaces, where co-located is defined as running on a node
// whose value of the label with key topologyKey matches that of any node on which any of the
// selected pods is running.
// Empty topologyKey is not allowed.
TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"`
}
// Node affinity is a group of node affinity scheduling rules.
type NodeAffinity struct {
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// will try to eventually evict the pod from its node.
// +optional
// RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"`
// If the affinity requirements specified by this field are not met at
// scheduling time, the pod will not be scheduled onto the node.
// If the affinity requirements specified by this field cease to be met
// at some point during pod execution (e.g. due to an update), the system
// may or may not try to eventually evict the pod from its node.
// +optional
RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"`
// The scheduler will prefer to schedule pods to nodes that satisfy
// the affinity expressions specified by this field, but it may choose
// a node that violates one or more of the expressions. The node that is
// most preferred is the one with the greatest sum of weights, i.e.
// for each node that meets all of the scheduling requirements (resource
// request, requiredDuringScheduling affinity expressions, etc.),
// compute a sum by iterating through the elements of this field and adding
// "weight" to the sum if the node matches the corresponding matchExpressions; the
// node(s) with the highest sum are the most preferred.
// +optional
PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"`
}
// An empty preferred scheduling term matches all objects with implicit weight 0
// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
type PreferredSchedulingTerm struct {
// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"`
// A node selector term, associated with the corresponding weight.
Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
}
// The node this Taint is attached to has the "effect" on
// any pod that does not tolerate the Taint.
type Taint struct {
// Required. The taint key to be applied to a node.
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
// Required. The taint value corresponding to the taint key.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Required. The effect of the taint on pods
// that do not tolerate the taint.
// Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
// TimeAdded represents the time at which the taint was added.
// It is only written for NoExecute taints.
// +optional
TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"`
}
type TaintEffect string
const (
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
// Enforced by the scheduler.
TaintEffectNoSchedule TaintEffect = "NoSchedule"
// Like TaintEffectNoSchedule, but the scheduler tries not to schedule
// new pods onto the node, rather than prohibiting new pods from scheduling
// onto the node entirely. Enforced by the scheduler.
TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// Like TaintEffectNoSchedule, but additionally do not allow pods submitted to
// Kubelet without going through the scheduler to start.
// Enforced by Kubelet and the scheduler.
// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
// Evict any already-running pods that do not tolerate the taint.
// Currently enforced by NodeController.
TaintEffectNoExecute TaintEffect = "NoExecute"
)
// The pod this Toleration is attached to tolerates any taint that matches
// the triple <key,value,effect> using the matching operator <operator>.
type Toleration struct {
// Key is the taint key that the toleration applies to. Empty means match all taint keys.
// If the key is empty, operator must be Exists; this combination means to match all values and all keys.
// +optional
Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"`
// Operator represents a key's relationship to the value.
// Valid operators are Exists and Equal. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
// +optional
Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
// Value is the taint value the toleration matches to.
// If the operator is Exists, the value should be empty, otherwise just a regular string.
// +optional
Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
// Effect indicates the taint effect to match. Empty means match all taint effects.
// When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
// +optional
Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
// TolerationSeconds represents the period of time the toleration (which must be
// of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
// it is not set, which means tolerate the taint forever (do not evict). Zero and
// negative values will be treated as 0 (evict immediately) by the system.
// +optional
TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"`
}
// A toleration operator is the set of operators that can be used in a toleration.
type TolerationOperator string
const (
TolerationOpExists TolerationOperator = "Exists"
TolerationOpEqual TolerationOperator = "Equal"
)
// PodSpec is a description of a pod.
type PodSpec struct {
// List of volumes that can be mounted by containers belonging to the pod.
// More info: https://kubernetes.io/docs/concepts/storage/volumes
// +optional
// +patchMergeKey=name
// +patchStrategy=merge,retainKeys
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
// List of initialization containers belonging to the pod.
// Init containers are executed in order prior to containers being started. If any
// init container fails, the pod is considered to have failed and is handled according
// to its restartPolicy. The name for an init container or normal container must be
// unique among all containers.
// Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
// of that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Cannot be updated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
// +patchMergeKey=name
// +patchStrategy=merge
InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"`
// List of containers belonging to the pod.
// Containers cannot currently be added or removed.
// There must be at least one container in a Pod.
// Cannot be updated.
// +patchMergeKey=name
// +patchStrategy=merge
Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"`
// Restart policy for all containers within the pod.
// One of Always, OnFailure, Never.
// Default to Always.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
// +optional
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"`
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
// Value must be non-negative integer. The value zero indicates delete immediately.
// If this value is nil, the default grace period will be used instead.
// The grace period is the duration in seconds after the processes running in the pod are sent
// a termination signal and the time when the processes are forcibly halted with a kill signal.
// Set this value longer than the expected cleanup time for your process.
// Defaults to 30 seconds.
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"`
// Optional duration in seconds the pod may be active on the node relative to
// StartTime before the system will actively try to mark it failed and kill associated containers.
// Value must be a positive integer.
// +optional
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"`
// Set DNS policy for the pod.
// Defaults to "ClusterFirst".
// Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
// DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
// To have DNS options set along with hostNetwork, you have to specify DNS policy
// explicitly to 'ClusterFirstWithHostNet'.
// +optional
DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"`
// NodeSelector is a selector which must be true for the pod to fit on a node.
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"`
// DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
// Deprecated: Use serviceAccountName instead.
// +k8s:conversion-gen=false
// +optional
DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"`
// AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"`
// NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
// the scheduler simply schedules this pod onto that node, assuming that it fits resource
// requirements.
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"`
// Host networking requested for this pod. Use the host's network namespace.
// If this option is set, the ports that will be used must be specified.
// Default to false.
// +k8s:conversion-gen=false
// +optional
HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"`
// Use the host's pid namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"`
// Use the host's ipc namespace.
// Optional: Default to false.
// +k8s:conversion-gen=false
// +optional
HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"`
// Share a single process namespace between all of the containers in a pod.
// When this is set containers will be able to view and signal processes from other containers
// in the same pod, and the first process in each container will not be assigned PID 1.
// HostPID and ShareProcessNamespace cannot both be set.
// Optional: Default to false.
// This field is alpha-level and is honored only by servers that enable the PodShareProcessNamespace feature.
// +k8s:conversion-gen=false
// +optional
ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"`
// SecurityContext holds pod-level security attributes and common container settings.
// Optional: Defaults to empty. See type description for default values of each field.
// +optional
SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"`
// ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
// If specified, these secrets will be passed to individual puller implementations for them to use. For example,
// in the case of docker, only DockerConfig type secrets are honored.
// More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"`
// Specifies the hostname of the Pod
// If not specified, the pod's hostname will be set to a system-defined value.
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"`
// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
// If not specified, the pod will not have a domainname at all.
// +optional
Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"`
// If specified, the pod's scheduling constraints
// +optional
Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"`
// If specified, the pod will be dispatched by specified scheduler.
// If not specified, the pod will be dispatched by default scheduler.
// +optional
SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"`
// If specified, the pod's tolerations.
// +optional
Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"`
// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
// file if specified. This is only valid for non-hostNetwork pods.
// +optional
// +patchMergeKey=ip
// +patchStrategy=merge
HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"`
// If specified, indicates the pod's priority. "system-node-critical" and
// "system-cluster-critical" are two special keywords which indicate the
// highest priorities with the former being the highest priority. Any other
// name must be defined by creating a PriorityClass object with that name.
// If not specified, the pod priority will be default or zero if there is no
// default.
// +optional
PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"`
// The priority value. Various system components use this field to find the
// priority of the pod. When Priority Admission Controller is enabled, it
// prevents users from setting this field. The admission controller populates
// this field from PriorityClassName.
// The higher the value, the higher the priority.
// +optional
Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"`
// Specifies the DNS parameters of a pod.
// Parameters specified here will be merged to the generated DNS
// configuration based on DNSPolicy.
// +optional
DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"`
}
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
// pod's hosts file.
type HostAlias struct {
// IP address of the host file entry.
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostnames for the above IP address.
Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"`
}
// PodSecurityContext holds pod-level security attributes and common container settings.
// Some fields are also present in container.securityContext. Field values of
// container.securityContext take precedence over field values of PodSecurityContext.
type PodSecurityContext struct {
// The SELinux context to be applied to all containers.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in SecurityContext. If set in
// both SecurityContext and PodSecurityContext, the value specified in SecurityContext
// takes precedence for that container.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence
// for that container.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"`
// The GID to run the entrypoint of the container process.
// Uses runtime default if unset.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence
// for that container.
// +optional
RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,6,opt,name=runAsGroup"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in SecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"`
// A list of groups applied to the first process run in each container, in addition
// to the container's primary GID. If unspecified, no groups will be added to
// any container.
// +optional
SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"`
// A special supplemental group that applies to all containers in a pod.
// Some volume types allow the Kubelet to change the ownership of that volume
// to be owned by the pod:
//
// 1. The owning GID will be the FSGroup
// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
// 3. The permission bits are OR'd with rw-rw----
//
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
// +optional
FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"`
}
// PodQOSClass defines the supported qos classes of Pods.
type PodQOSClass string
const (
// PodQOSGuaranteed is the Guaranteed qos class.
PodQOSGuaranteed PodQOSClass = "Guaranteed"
// PodQOSBurstable is the Burstable qos class.
PodQOSBurstable PodQOSClass = "Burstable"
// PodQOSBestEffort is the BestEffort qos class.
PodQOSBestEffort PodQOSClass = "BestEffort"
)
// PodDNSConfig defines the DNS parameters of a pod in addition to
// those generated from DNSPolicy.
type PodDNSConfig struct {
// A list of DNS name server IP addresses.
// This will be appended to the base nameservers generated from DNSPolicy.
// Duplicated nameservers will be removed.
// +optional
Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"`
// A list of DNS search domains for host-name lookup.
// This will be appended to the base search paths generated from DNSPolicy.
// Duplicated search paths will be removed.
// +optional
Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"`
// A list of DNS resolver options.
// This will be merged with the base options generated from DNSPolicy.
// Duplicated entries will be removed. Resolution options given in Options
// will override those that appear in the base DNSPolicy.
// +optional
Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"`
}
// PodDNSConfigOption defines DNS resolver options of a pod.
type PodDNSConfigOption struct {
// Required.
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// +optional
Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
}
// PodStatus represents information about the status of a pod. Status may trail the actual
// state of a system.
type PodStatus struct {
// Current condition of the pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
// +optional
Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"`
// Current service state of pod.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
// A human readable message indicating details about why the pod is in this condition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// A brief CamelCase message indicating details about why the pod is in this state.
// e.g. 'Evicted'
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be
// scheduled right away as preemption victims receive their graceful termination periods.
// This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide
// to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to
// give the resources on this node to a higher priority pod that is created after preemption.
// As a result, this field may be different than PodSpec.nodeName when the pod is
// scheduled.
// +optional
NominatedNodeName string `json:"nominatedNodeName,omitempty" protobuf:"bytes,11,opt,name=nominatedNodeName"`
// IP address of the host to which the pod is assigned. Empty if not yet scheduled.
// +optional
HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"`
// IP address allocated to the pod. Routable at least within the cluster.
// Empty if not yet allocated.
// +optional
PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"`
// RFC 3339 date and time at which the object was acknowledged by the Kubelet.
// This is before the Kubelet pulled the container image(s) for the pod.
// +optional
StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
// The list has one entry per init container in the manifest. The most recent successful
// init container will have ready = true, the most recently started container will have
// startTime set.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"`
// The list has one entry per container in the manifest. Each entry is currently the output
// of `docker inspect`.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status
// +optional
ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"`
// The Quality of Service (QOS) classification assigned to the pod based on resource requirements
// See PodQOSClass type for available QOS classes
// More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md
// +optional
QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
type PodStatusResult struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Pod is a collection of containers that can run on a host. This resource is created
// by clients and scheduled onto hosts.
type Pod struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the pod.
// This data may not be up to date.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodList is a list of Pods.
type PodList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pods.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md
Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// PodTemplateSpec describes the data a pod should have when created from a template
type PodTemplateSpec struct {
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Specification of the desired behavior of the pod.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplate describes a template for creating copies of a predefined pod.
type PodTemplate struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Template defines the pods that will be created from this pod template.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodTemplateList is a list of PodTemplates.
type PodTemplateList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of pod templates
Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// ReplicationControllerSpec is the specification of a replication controller.
type ReplicationControllerSpec struct {
// Replicas is the number of desired replicas.
// This is a pointer to distinguish between explicit zero and unspecified.
// Defaults to 1.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
// +optional
Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"`
// Minimum number of seconds for which a newly created pod should be ready
// without any of its container crashing, for it to be considered available.
// Defaults to 0 (pod will be considered available as soon as it is ready)
// +optional
MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"`
// Selector is a label query over pods that should match the Replicas count.
// If Selector is empty, it is defaulted to the labels present on the Pod template.
// Label keys and values that must match in order to be controlled by this replication
// controller, if empty defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// TemplateRef is a reference to an object that describes the pod that will be created if
// insufficient replicas are detected.
// Reference to an object that describes the pod that will be created if insufficient replicas are detected.
// +optional
// TemplateRef *ObjectReference `json:"templateRef,omitempty"`
// Template is the object that describes the pod that will be created if
// insufficient replicas are detected. This takes precedence over a TemplateRef.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
// +optional
Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"`
}
// ReplicationControllerStatus represents the current status of a replication
// controller.
type ReplicationControllerStatus struct {
// Replicas is the most recently oberved number of replicas.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller
Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"`
// The number of pods that have labels matching the labels of the pod template of the replication controller.
// +optional
FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"`
// The number of ready replicas for this replication controller.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"`
// The number of available replicas (ready for at least minReadySeconds) for this replication controller.
// +optional
AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"`
// ObservedGeneration reflects the generation of the most recently observed replication controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
// Represents the latest available observations of a replication controller's current state.
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"`
}
type ReplicationControllerConditionType string
// These are valid conditions of a replication controller.
const (
// ReplicationControllerReplicaFailure is added in a replication controller when one of its pods
// fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors,
// etc. or deleted due to kubelet being down or finalizers are failing.
ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure"
)
// ReplicationControllerCondition describes the state of a replication controller at a certain point.
type ReplicationControllerCondition struct {
// Type of replication controller condition.
Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// The last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationController represents the configuration of a replication controller.
type ReplicationController struct {
metav1.TypeMeta `json:",inline"`
// If the Labels of a ReplicationController are empty, they are defaulted to
// be the same as the Pod(s) that the replication controller manages.
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the specification of the desired behavior of the replication controller.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status is the most recently observed status of the replication controller.
// This data may be out of date by some window of time.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicationControllerList is a collection of replication controllers.
type ReplicationControllerList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of replication controllers.
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Session Affinity Type string
type ServiceAffinity string
const (
// ServiceAffinityClientIP is the Client IP based.
ServiceAffinityClientIP ServiceAffinity = "ClientIP"
// ServiceAffinityNone - no session affinity.
ServiceAffinityNone ServiceAffinity = "None"
)
const DefaultClientIPServiceAffinitySeconds int32 = 10800
// SessionAffinityConfig represents the configurations of session affinity.
type SessionAffinityConfig struct {
// clientIP contains the configurations of Client IP based session affinity.
// +optional
ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"`
}
// ClientIPConfig represents the configurations of Client IP based session affinity.
type ClientIPConfig struct {
// timeoutSeconds specifies the seconds of ClientIP type session sticky time.
// The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP".
// Default value is 10800(for 3 hours).
// +optional
TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"`
}
// Service Type string describes ingress methods for a service
type ServiceType string
const (
// ServiceTypeClusterIP means a service will only be accessible inside the
// cluster, via the cluster IP.
ServiceTypeClusterIP ServiceType = "ClusterIP"
// ServiceTypeNodePort means a service will be exposed on one port of
// every node, in addition to 'ClusterIP' type.
ServiceTypeNodePort ServiceType = "NodePort"
// ServiceTypeLoadBalancer means a service will be exposed via an
// external load balancer (if the cloud provider supports it), in addition
// to 'NodePort' type.
ServiceTypeLoadBalancer ServiceType = "LoadBalancer"
// ServiceTypeExternalName means a service consists of only a reference to
// an external name that kubedns or equivalent will return as a CNAME
// record, with no exposing or proxying of any pods involved.
ServiceTypeExternalName ServiceType = "ExternalName"
)
// Service External Traffic Policy Type string
type ServiceExternalTrafficPolicyType string
const (
// ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior.
ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local"
// ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior.
ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster"
)
// ServiceStatus represents the current status of a service.
type ServiceStatus struct {
// LoadBalancer contains the current status of the load-balancer,
// if one is present.
// +optional
LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"`
}
// LoadBalancerStatus represents the status of a load-balancer.
type LoadBalancerStatus struct {
// Ingress is a list containing ingress points for the load-balancer.
// Traffic intended for the service should be sent to these ingress points.
// +optional
Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"`
}
// LoadBalancerIngress represents the status of a load-balancer ingress point:
// traffic intended for the service should be sent to an ingress point.
type LoadBalancerIngress struct {
// IP is set for load-balancer ingress points that are IP based
// (typically GCE or OpenStack load-balancers)
// +optional
IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"`
// Hostname is set for load-balancer ingress points that are DNS based
// (typically AWS load-balancers)
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"`
}
// ServiceSpec describes the attributes that a user creates on a service.
type ServiceSpec struct {
// The list of ports that are exposed by this service.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +patchMergeKey=port
// +patchStrategy=merge
Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"`
// Route service traffic to pods with label keys and values matching this
// selector. If empty or not present, the service is assumed to have an
// external process managing its endpoints, which Kubernetes will not
// modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
// Ignored if type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/
// +optional
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// clusterIP is the IP address of the service and is usually assigned
// randomly by the master. If an address is specified manually and is not in
// use by others, it will be allocated to the service; otherwise, creation
// of the service will fail. This field can not be changed through updates.
// Valid values are "None", empty string (""), or a valid IP address. "None"
// can be specified for headless services when proxying is not required.
// Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if
// type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"`
// type determines how the Service is exposed. Defaults to ClusterIP. Valid
// options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
// "ExternalName" maps to the specified externalName.
// "ClusterIP" allocates a cluster-internal IP address for load-balancing to
// endpoints. Endpoints are determined by the selector or if that is not
// specified, by manual construction of an Endpoints object. If clusterIP is
// "None", no virtual IP is allocated and the endpoints are published as a
// set of endpoints rather than a stable IP.
// "NodePort" builds on ClusterIP and allocates a port on every node which
// routes to the clusterIP.
// "LoadBalancer" builds on NodePort and creates an
// external load-balancer (if supported in the current cloud) which routes
// to the clusterIP.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
// +optional
Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"`
// externalIPs is a list of IP addresses for which nodes in the cluster
// will also accept traffic for this service. These IPs are not managed by
// Kubernetes. The user is responsible for ensuring that traffic arrives
// at a node with this IP. A common example is external load-balancers
// that are not part of the Kubernetes system.
// +optional
ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"`
// Supports "ClientIP" and "None". Used to maintain session affinity.
// Enable client IP based session affinity.
// Must be ClientIP or None.
// Defaults to None.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"`
// Only applies to Service Type: LoadBalancer
// LoadBalancer will get created with the IP specified in this field.
// This feature depends on whether the underlying cloud-provider supports specifying
// the loadBalancerIP when a load balancer is created.
// This field will be ignored if the cloud-provider does not support the feature.
// +optional
LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"`
// If specified and supported by the platform, this will restrict traffic through the cloud-provider
// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
// cloud-provider does not support the feature."
// More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
// +optional
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
// externalName is the external reference that kubedns or equivalent will
// return as a CNAME record for this service. No proxying will be involved.
// Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
// and requires Type to be ExternalName.
// +optional
ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"`
// externalTrafficPolicy denotes if this Service desires to route external
// traffic to node-local or cluster-wide endpoints. "Local" preserves the
// client source IP and avoids a second hop for LoadBalancer and Nodeport
// type services, but risks potentially imbalanced traffic spreading.
// "Cluster" obscures the client source IP and may cause a second hop to
// another node, but should have good overall load-spreading.
// +optional
ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"`
// healthCheckNodePort specifies the healthcheck nodePort for the service.
// If not specified, HealthCheckNodePort is created by the service api
// backend with the allocated nodePort. Will use user-specified nodePort value
// if specified by the client. Only effects when Type is set to LoadBalancer
// and ExternalTrafficPolicy is set to Local.
// +optional
HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"`
// publishNotReadyAddresses, when set to true, indicates that DNS implementations
// must publish the notReadyAddresses of subsets for the Endpoints associated with
// the Service. The default value is false.
// The primary use case for setting this field is to use a StatefulSet's Headless Service
// to propagate SRV records for its Pods without respect to their readiness for purpose
// of peer discovery.
// +optional
PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"`
// sessionAffinityConfig contains the configurations of session affinity.
// +optional
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
}
// ServicePort contains information on service's port.
type ServicePort struct {
// The name of this port within the service. This must be a DNS_LABEL.
// All ports within a ServiceSpec must have unique names. This maps to
// the 'Name' field in EndpointPort objects.
// Optional if only one ServicePort is defined on this service.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The IP protocol for this port. Supports "TCP" and "UDP".
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"`
// The port that will be exposed by this service.
Port int32 `json:"port" protobuf:"varint,3,opt,name=port"`
// Number or name of the port to access on the pods targeted by the service.
// Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
// If this is a string, it will be looked up as a named port in the
// target Pod's container ports. If this is not specified, the value
// of the 'port' field is used (an identity map).
// This field is ignored for services with clusterIP=None, and should be
// omitted or set equal to the 'port' field.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service
// +optional
TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"`
// The port on each node on which this service is exposed when type=NodePort or LoadBalancer.
// Usually assigned by the system. If specified, it will be allocated to the service
// if unused or else creation of the service will fail.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
// +optional
NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"`
}
// +genclient
// +genclient:skipVerbs=deleteCollection
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Service is a named abstraction of software service (for example, mysql) consisting of local port
// (for example 3306) that the proxy listens on, and the selector that determines which pods
// will answer requests sent through the proxy.
type Service struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a service.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the service.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
const (
// ClusterIPNone - do not assign a cluster IP
// no proxying required and no environment variables should be created for pods
ClusterIPNone = "None"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceList holds a list of services.
type ServiceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of services
Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccount binds together:
// * a name, understood by users, and perhaps by peripheral systems, for an identity
// * a principal that can be authenticated and authorized
// * a set of secrets
type ServiceAccount struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"`
// ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images
// in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets
// can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet.
// More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
// +optional
ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"`
// AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.
// Can be overridden at the pod level.
// +optional
AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceAccountList is a list of ServiceAccount objects
type ServiceAccountList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ServiceAccounts.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Endpoints is a collection of endpoints that implement the actual service. Example:
// Name: "mysvc",
// Subsets: [
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// },
// {
// Addresses: [{"ip": "10.10.3.3"}],
// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}]
// },
// ]
type Endpoints struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The set of all endpoints is the union of all subsets. Addresses are placed into
// subsets according to the IPs they share. A single address with multiple ports,
// some of which are ready and some of which are not (because they come from
// different containers) will result in the address being displayed in different
// subsets for the different ports. No address will appear in both Addresses and
// NotReadyAddresses in the same subset.
// Sets of addresses and ports that comprise a service.
// +optional
Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"`
}
// EndpointSubset is a group of addresses with a common set of ports. The
// expanded set of endpoints is the Cartesian product of Addresses x Ports.
// For example, given:
// {
// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}],
// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}]
// }
// The resulting set of endpoints can be viewed as:
// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],
// b: [ 10.10.1.1:309, 10.10.2.2:309 ]
type EndpointSubset struct {
// IP addresses which offer the related ports that are marked as ready. These endpoints
// should be considered safe for load balancers and clients to utilize.
// +optional
Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"`
// IP addresses which offer the related ports but are not currently marked as ready
// because they have not yet finished starting, have recently failed a readiness check,
// or have recently failed a liveness check.
// +optional
NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"`
// Port numbers available on the related IP addresses.
// +optional
Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"`
}
// EndpointAddress is a tuple that describes single IP address.
type EndpointAddress struct {
// The IP of this endpoint.
// May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
// or link-local multicast ((224.0.0.0/24).
// IPv6 is also accepted but not fully supported on all platforms. Also, certain
// kubernetes components, like kube-proxy, are not IPv6 ready.
// TODO: This should allow hostname or IP, See #4447.
IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"`
// The Hostname of this endpoint
// +optional
Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"`
// Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.
// +optional
NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"`
// Reference to object providing the endpoint.
// +optional
TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"`
}
// EndpointPort is a tuple that describes a single port.
type EndpointPort struct {
// The name of this port (corresponds to ServicePort.Name).
// Must be a DNS_LABEL.
// Optional only if one port is defined.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
// The port number of the endpoint.
Port int32 `json:"port" protobuf:"varint,2,opt,name=port"`
// The IP protocol for this port.
// Must be UDP or TCP.
// Default is TCP.
// +optional
Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EndpointsList is a list of endpoints.
type EndpointsList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of endpoints.
Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// NodeSpec describes the attributes that a node is created with.
type NodeSpec struct {
// PodCIDR represents the pod IP range assigned to the node.
// +optional
PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"`
// ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>
// +optional
ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"`
// Unschedulable controls node schedulability of new pods. By default, node is schedulable.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration
// +optional
Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"`
// If specified, the node's taints.
// +optional
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
// If specified, the source to get node configuration from
// The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
// +optional
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
// Deprecated. Not all kubelets will set this field. Remove field after 1.13.
// see: https://issues.k8s.io/61966
// +optional
DoNotUse_ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"`
}
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
type NodeConfigSource struct {
// For historical context, regarding the below kind, apiVersion, and configMapRef deprecation tags:
// 1. kind/apiVersion were used by the kubelet to persist this struct to disk (they had no protobuf tags)
// 2. configMapRef and proto tag 1 were used by the API to refer to a configmap,
// but used a generic ObjectReference type that didn't really have the fields we needed
// All uses/persistence of the NodeConfigSource struct prior to 1.11 were gated by alpha feature flags,
// so there was no persisted data for these fields that needed to be migrated/handled.
// +k8s:deprecated=kind
// +k8s:deprecated=apiVersion
// +k8s:deprecated=configMapRef,protobuf=1
// ConfigMap is a reference to a Node's ConfigMap
ConfigMap *ConfigMapNodeConfigSource `json:"configMap,omitempty" protobuf:"bytes,2,opt,name=configMap"`
}
// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
type ConfigMapNodeConfigSource struct {
// Namespace is the metadata.namespace of the referenced ConfigMap.
// This field is required in all cases.
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
// Name is the metadata.name of the referenced ConfigMap.
// This field is required in all cases.
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
// UID is the metadata.UID of the referenced ConfigMap.
// This field is forbidden in Node.Spec, and required in Node.Status.
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"`
// ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap.
// This field is forbidden in Node.Spec, and required in Node.Status.
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"`
// KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure
// This field is required in all cases.
KubeletConfigKey string `json:"kubeletConfigKey" protobuf:"bytes,5,opt,name=kubeletConfigKey"`
}
// DaemonEndpoint contains information about a single Daemon endpoint.
type DaemonEndpoint struct {
/*
The port tag was not properly in quotes in earlier releases, so it must be
uppercased for backwards compat (since it was falling back to var name of
'Port').
*/
// Port number of the given endpoint.
Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"`
}
// NodeDaemonEndpoints lists ports opened by daemons running on the Node.
type NodeDaemonEndpoints struct {
// Endpoint on which Kubelet is listening.
// +optional
KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"`
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"`
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"`
// Kubelet Version reported by the node.
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
// KubeProxy Version reported by the node.
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
}
// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.
type NodeConfigStatus struct {
// Assigned reports the checkpointed config the node will try to use.
// When Node.Spec.ConfigSource is updated, the node checkpoints the associated
// config payload to local disk, along with a record indicating intended
// config. The node refers to this record to choose its config checkpoint, and
// reports this record in Assigned. Assigned only updates in the status after
// the record has been checkpointed to disk. When the Kubelet is restarted,
// it tries to make the Assigned config the Active config by loading and
// validating the checkpointed payload identified by Assigned.
// +optional
Assigned *NodeConfigSource `json:"assigned,omitempty" protobuf:"bytes,1,opt,name=assigned"`
// Active reports the checkpointed config the node is actively using.
// Active will represent either the current version of the Assigned config,
// or the current LastKnownGood config, depending on whether attempting to use the
// Assigned config results in an error.
// +optional
Active *NodeConfigSource `json:"active,omitempty" protobuf:"bytes,2,opt,name=active"`
// LastKnownGood reports the checkpointed config the node will fall back to
// when it encounters an error attempting to use the Assigned config.
// The Assigned config becomes the LastKnownGood config when the node determines
// that the Assigned config is stable and correct.
// This is currently implemented as a 10-minute soak period starting when the local
// record of Assigned config is updated. If the Assigned config is Active at the end
// of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is
// reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil,
// because the local default config is always assumed good.
// You should not make assumptions about the node's method of determining config stability
// and correctness, as this may change or become configurable in the future.
// +optional
LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" protobuf:"bytes,3,opt,name=lastKnownGood"`
// Error describes any problems reconciling the Spec.ConfigSource to the Active config.
// Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned
// record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting
// to load or validate the Assigned config, etc.
// Errors may occur at different points while syncing config. Earlier errors (e.g. download or
// checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across
// Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in
// a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error
// by fixing the config assigned in Spec.ConfigSource.
// You can find additional information for debugging by searching the error message in the Kubelet log.
// Error is a human-readable description of the error state; machines can check whether or not Error
// is empty, but should not rely on the stability of the Error text across Kubelet versions.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// NodeStatus is information about the current status of a node.
type NodeStatus struct {
// Capacity represents the total resources of a node.
// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity
// +optional
Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
// Allocatable represents the resources of a node that are available for scheduling.
// Defaults to Capacity.
// +optional
Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"`
// NodePhase is the recently observed lifecycle phase of the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#phase
// The field is never populated, and now is deprecated.
// +optional
Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"`
// Conditions is an array of current observed node conditions.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#condition
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"`
// List of addresses reachable to the node.
// Queried from cloud provider, if available.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"`
// Endpoints of daemons running on the Node.
// +optional
DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"`
// Set of ids/uuids to uniquely identify the node.
// More info: https://kubernetes.io/docs/concepts/nodes/node/#info
// +optional
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
// List of container images on this node
// +optional
Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"`
// List of attachable volumes in use (mounted) by the node.
// +optional
VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"`
// List of volumes that are attached to the node.
// +optional
VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"`
// Status of the config assigned to the node via the dynamic Kubelet config feature.
// +optional
Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"`
}
type UniqueVolumeName string
// AttachedVolume describes a volume attached to a node
type AttachedVolume struct {
// Name of the attached volume
Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"`
// DevicePath represents the device path where the volume should be available
DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"`
}
// AvoidPods describes pods that should avoid this node. This is the value for a
// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and
// will eventually become a field of NodeStatus.
type AvoidPods struct {
// Bounded-sized list of signatures of pods that should avoid this node, sorted
// in timestamp order from oldest to newest. Size of the slice is unspecified.
// +optional
PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"`
}
// Describes a class of pods that should avoid this node.
type PreferAvoidPodsEntry struct {
// The class of pods.
PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"`
// Time at which this entry was added to the list.
// +optional
EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"`
// (brief) reason why this entry was added to the list.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// Human readable message indicating why this entry was added to the list.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
}
// Describes the class of pods that should avoid this node.
// Exactly one field should be set.
type PodSignature struct {
// Reference to controller whose pods should avoid this node.
// +optional
PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"`
}
// Describe a container image
type ContainerImage struct {
// Names by which this image is known.
// e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
Names []string `json:"names" protobuf:"bytes,1,rep,name=names"`
// The size of the image in bytes.
// +optional
SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"`
}
type NodePhase string
// These are the valid phases of node.
const (
// NodePending means the node has been created/added by the system, but not configured.
NodePending NodePhase = "Pending"
// NodeRunning means the node has been configured and has Kubernetes components running.
NodeRunning NodePhase = "Running"
// NodeTerminated means the node has been removed from the cluster.
NodeTerminated NodePhase = "Terminated"
)
type NodeConditionType string
// These are valid conditions of node. Currently, we don't have enough information to decide
// node condition. In the future, we will add more. The proposed set of conditions are:
// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable.
const (
// NodeReady means kubelet is healthy and ready to accept pods.
NodeReady NodeConditionType = "Ready"
// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
// space on the node.
NodeOutOfDisk NodeConditionType = "OutOfDisk"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
NodeDiskPressure NodeConditionType = "DiskPressure"
// NodePIDPressure means the kubelet is under pressure due to insufficient available PID.
NodePIDPressure NodeConditionType = "PIDPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
)
// NodeCondition contains condition information for a node.
type NodeCondition struct {
// Type of node condition.
Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"`
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Last time we got an update on a given condition.
// +optional
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"`
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"`
}
type NodeAddressType string
// These are valid address type of node.
const (
NodeHostName NodeAddressType = "Hostname"
NodeExternalIP NodeAddressType = "ExternalIP"
NodeInternalIP NodeAddressType = "InternalIP"
NodeExternalDNS NodeAddressType = "ExternalDNS"
NodeInternalDNS NodeAddressType = "InternalDNS"
)
// NodeAddress contains information for the node's address.
type NodeAddress struct {
// Node address type, one of Hostname, ExternalIP or InternalIP.
Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"`
// The node address.
Address string `json:"address" protobuf:"bytes,2,opt,name=address"`
}
// ResourceName is the name identifying various resources in a ResourceList.
type ResourceName string
// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters,
// with the -, _, and . characters allowed anywhere, except the first or last character.
// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than
// camel case, separating compound words.
// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name.
const (
// CPU, in cores. (500m = .5 cores)
ResourceCPU ResourceName = "cpu"
// Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceMemory ResourceName = "memory"
// Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024)
ResourceStorage ResourceName = "storage"
// Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// The resource name for ResourceEphemeralStorage is alpha and it can change across releases.
ResourceEphemeralStorage ResourceName = "ephemeral-storage"
)
const (
// Default namespace prefix.
ResourceDefaultNamespacePrefix = "kubernetes.io/"
// Name prefix for huge page resources (alpha).
ResourceHugePagesPrefix = "hugepages-"
)
// ResourceList is a set of (resource name, quantity) pairs.
type ResourceList map[ResourceName]resource.Quantity
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Node is a worker node in Kubernetes.
// Each node will have a unique identifier in the cache (i.e. in etcd).
type Node struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of a node.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Most recently observed status of the node.
// Populated by the system.
// Read-only.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeList is the whole list of all Nodes which have been registered with master.
type NodeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of nodes
Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// FinalizerName is the name identifying a finalizer during namespace lifecycle.
type FinalizerName string
// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or
// in metav1.
const (
FinalizerKubernetes FinalizerName = "kubernetes"
)
// NamespaceSpec describes the attributes on a Namespace.
type NamespaceSpec struct {
// Finalizers is an opaque list of values that must be empty to permanently remove object from storage.
// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional
Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"`
}
// NamespaceStatus is information about the current status of a Namespace.
type NamespaceStatus struct {
// Phase is the current lifecycle phase of the namespace.
// More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/
// +optional
Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"`
}
type NamespacePhase string
// These are the valid phases of a namespace.
const (
// NamespaceActive means the namespace is available for use in the system
NamespaceActive NamespacePhase = "Active"
// NamespaceTerminating means the namespace is undergoing graceful termination
NamespaceTerminating NamespacePhase = "Terminating"
)
// +genclient
// +genclient:nonNamespaced
// +genclient:skipVerbs=deleteCollection
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Namespace provides a scope for Names.
// Use of multiple namespaces is optional.
type Namespace struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the behavior of the Namespace.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status describes the current status of a Namespace.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NamespaceList is a list of Namespaces.
type NamespaceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of Namespace objects in the list.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Binding ties one object to another; for example, a pod is bound to a node by a scheduler.
// Deprecated in 1.7, please use the bindings subresource of pods instead.
type Binding struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// The target object that you want to bind to the standard object.
Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"`
}
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
// +k8s:openapi-gen=false
type Preconditions struct {
// Specifies the target UID.
// +optional
UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodLogOptions is the query options for a Pod's logs REST call.
type PodLogOptions struct {
metav1.TypeMeta `json:",inline"`
// The container for which to stream logs. Defaults to only container if there is one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"`
// Follow the log stream of the pod. Defaults to false.
// +optional
Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"`
// Return previous terminated container logs. Defaults to false.
// +optional
Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"`
// A relative time in seconds before the current time from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"`
// An RFC3339 timestamp from which to show logs. If this value
// precedes the time a pod was started, only logs since the pod start will be returned.
// If this value is in the future, no logs will be returned.
// Only one of sinceSeconds or sinceTime may be specified.
// +optional
SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"`
// If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line
// of log output. Defaults to false.
// +optional
Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"`
// If set, the number of lines from the end of the logs to show. If not specified,
// logs are shown from the creation of the container or sinceSeconds or sinceTime
// +optional
TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"`
// If set, the number of bytes to read from the server before terminating the
// log output. This may not display a complete final line of logging, and may return
// slightly more or slightly less than the specified limit.
// +optional
LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodAttachOptions is the query options to a Pod's remote attach call.
// ---
// TODO: merge w/ PodExecOptions below for stdin, stdout, etc
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodAttachOptions struct {
metav1.TypeMeta `json:",inline"`
// Stdin if true, redirects the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Stdout if true indicates that stdout is to be redirected for the attach call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Stderr if true indicates that stderr is to be redirected for the attach call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the attach call.
// This is passed through the container runtime so the tty
// is allocated on the worker node by the container runtime.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// The container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodExecOptions is the query options to a Pod's remote exec call.
// ---
// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging
// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY
type PodExecOptions struct {
metav1.TypeMeta `json:",inline"`
// Redirect the standard input stream of the pod for this call.
// Defaults to false.
// +optional
Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"`
// Redirect the standard output stream of the pod for this call.
// Defaults to true.
// +optional
Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"`
// Redirect the standard error stream of the pod for this call.
// Defaults to true.
// +optional
Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"`
// TTY if true indicates that a tty will be allocated for the exec call.
// Defaults to false.
// +optional
TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"`
// Container in which to execute the command.
// Defaults to only container if there is only one container in the pod.
// +optional
Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"`
// Command is the remote command to execute. argv array. Not executed within a shell.
Command []string `json:"command" protobuf:"bytes,6,rep,name=command"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodPortForwardOptions is the query options to a Pod's port forward call
// when using WebSockets.
// The `port` query parameter must specify the port or
// ports (comma separated) to forward over.
// Port forwarding over SPDY does not use these options. It requires the port
// to be passed in the `port` header as part of request.
type PodPortForwardOptions struct {
metav1.TypeMeta `json:",inline"`
// List of ports to forward
// Required when using WebSockets
// +optional
Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodProxyOptions is the query options to a Pod's proxy call.
type PodProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to pod.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeProxyOptions is the query options to a Node's proxy call.
type NodeProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the URL path to use for the current proxy request to node.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ServiceProxyOptions is the query options to a Service's proxy call.
type ServiceProxyOptions struct {
metav1.TypeMeta `json:",inline"`
// Path is the part of URLs that include service endpoints, suffixes,
// and parameters to use for the current proxy request to service.
// For example, the whole request URL is
// http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy.
// Path is _search?q=user:kimchy.
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
}
// ObjectReference contains enough information to let you inspect or modify the referred object.
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
type ObjectReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
// Namespace of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"`
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"`
// UID of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
// +optional
UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"`
// API version of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"`
// Specific resourceVersion to which this reference is made, if any.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
// +optional
ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"`
// If referring to a piece of an object instead of an entire object, this string
// should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].
// For example, if the object reference is to a container within a pod, this would take on a value like:
// "spec.containers{name}" (where "name" refers to the name of the container that triggered
// the event) or if no container name is specified "spec.containers[2]" (container with
// index 2 in this pod). This syntax is chosen only to have some well-defined way of
// referencing a part of an object.
// TODO: this design is not final and this field is subject to change in the future.
// +optional
FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"`
}
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// TODO: Add other useful fields. apiVersion, kind, uid?
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SerializedReference is a reference to serialized object.
type SerializedReference struct {
metav1.TypeMeta `json:",inline"`
// The reference to an object in the system.
// +optional
Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"`
}
// EventSource contains information for an event.
type EventSource struct {
// Component from which the event is generated.
// +optional
Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"`
// Node name on which the event is generated.
// +optional
Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"`
}
// Valid values for event types (new types could be added in future)
const (
// Information only and will not cause any problems
EventTypeNormal string = "Normal"
// These events are to warn that something might go wrong
EventTypeWarning string = "Warning"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Event is a report of an event somewhere in the cluster.
type Event struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"`
// The object that this event is about.
InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"`
// This should be a short, machine understandable string that gives the reason
// for the transition into the object's current status.
// TODO: provide exact specification for format.
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
// A human-readable description of the status of this operation.
// TODO: decide on maximum length.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"`
// The component reporting this event. Should be a short machine understandable string.
// +optional
Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"`
// The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)
// +optional
FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"`
// The time at which the most recent occurrence of this event was recorded.
// +optional
LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"`
// The number of times this event has occurred.
// +optional
Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"`
// Type of this event (Normal, Warning), new types could be added in the future
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"`
// Time when this Event was first observed.
// +optional
EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"`
// Data about the Event series this event represents or nil if it's a singleton Event.
// +optional
Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"`
// What action was taken/failed regarding to the Regarding object.
// +optional
Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"`
// Optional secondary object for more complex actions.
// +optional
Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"`
// Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.
// +optional
ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"`
// ID of the controller instance, e.g. `kubelet-xyzf`.
// +optional
ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"`
}
// EventSeries contain information on series of events, i.e. thing that was/is happening
// continuously for some time.
type EventSeries struct {
// Number of occurrences in this series up to the last heartbeat time
Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"`
// Time of the last occurrence observed
LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"`
// State of this Series: Ongoing or Finished
State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"`
}
type EventSeriesState string
const (
EventSeriesStateOngoing EventSeriesState = "Ongoing"
EventSeriesStateFinished EventSeriesState = "Finished"
EventSeriesStateUnknown EventSeriesState = "Unknown"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// EventList is a list of events.
type EventList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of events
Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// List holds a list of objects, which may not be known by the server.
type List metav1.List
// LimitType is a type of object that is limited
type LimitType string
const (
// Limit that applies to all pods in a namespace
LimitTypePod LimitType = "Pod"
// Limit that applies to all containers in a namespace
LimitTypeContainer LimitType = "Container"
// Limit that applies to all persistent volume claims in a namespace
LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim"
)
// LimitRangeItem defines a min/max usage limit for any resource that matches on kind.
type LimitRangeItem struct {
// Type of resource that this limit applies to.
// +optional
Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"`
// Max usage constraints on this kind by resource name.
// +optional
Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"`
// Min usage constraints on this kind by resource name.
// +optional
Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"`
// Default resource requirement limit value by resource name if resource limit is omitted.
// +optional
Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"`
// DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.
// +optional
DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"`
// MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
// +optional
MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"`
}
// LimitRangeSpec defines a min/max usage limit for resources that match on kind.
type LimitRangeSpec struct {
// Limits is the list of LimitRangeItem objects that are enforced.
Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRange sets resource usage limits for each kind of resource in a Namespace.
type LimitRange struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the limits enforced.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// LimitRangeList is a list of LimitRange items.
type LimitRangeList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of LimitRange objects.
// More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// The following identify resource constants for Kubernetes object types
const (
// Pods, number
ResourcePods ResourceName = "pods"
// Services, number
ResourceServices ResourceName = "services"
// ReplicationControllers, number
ResourceReplicationControllers ResourceName = "replicationcontrollers"
// ResourceQuotas, number
ResourceQuotas ResourceName = "resourcequotas"
// ResourceSecrets, number
ResourceSecrets ResourceName = "secrets"
// ResourceConfigMaps, number
ResourceConfigMaps ResourceName = "configmaps"
// ResourcePersistentVolumeClaims, number
ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims"
// ResourceServicesNodePorts, number
ResourceServicesNodePorts ResourceName = "services.nodeports"
// ResourceServicesLoadBalancers, number
ResourceServicesLoadBalancers ResourceName = "services.loadbalancers"
// CPU request, in cores. (500m = .5 cores)
ResourceRequestsCPU ResourceName = "requests.cpu"
// Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsMemory ResourceName = "requests.memory"
// Storage request, in bytes
ResourceRequestsStorage ResourceName = "requests.storage"
// Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage"
// CPU limit, in cores. (500m = .5 cores)
ResourceLimitsCPU ResourceName = "limits.cpu"
// Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsMemory ResourceName = "limits.memory"
// Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage"
)
// The following identify resource prefix for Kubernetes object types
const (
// HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024)
// As burst is not supported for HugePages, we would only quota its request, and ignore the limit.
ResourceRequestsHugePagesPrefix = "requests.hugepages-"
// Default resource requests prefix
DefaultResourceRequestsPrefix = "requests."
)
// A ResourceQuotaScope defines a filter that must match each object tracked by a quota
type ResourceQuotaScope string
const (
// Match all pod objects where spec.activeDeadlineSeconds
ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating"
// Match all pod objects where !spec.activeDeadlineSeconds
ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating"
// Match all pod objects that have best effort quality of service
ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort"
// Match all pod objects that do not have best effort quality of service
ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort"
)
// ResourceQuotaSpec defines the desired hard limits to enforce for Quota.
type ResourceQuotaSpec struct {
// Hard is the set of desired hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// A collection of filters that must match each object tracked by a quota.
// If not specified, the quota matches all objects.
// +optional
Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"`
}
// ResourceQuotaStatus defines the enforced hard limits and observed use.
type ResourceQuotaStatus struct {
// Hard is the set of enforced hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// Used is the current observed total usage of the resource in the namespace.
// +optional
Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuota sets aggregate quota restrictions enforced per namespace
type ResourceQuota struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec defines the desired quota.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
// Status defines the actual enforced quota and its current usage.
// https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
// +optional
Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceQuotaList is a list of ResourceQuota items.
type ResourceQuotaList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of ResourceQuota objects.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Secret holds secret data of a certain type. The total bytes of the values in
// the Data field must be less than MaxSecretSize bytes.
type Secret struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data contains the secret data. Each key must consist of alphanumeric
// characters, '-', '_' or '.'. The serialized form of the secret data is a
// base64 encoded string, representing the arbitrary (possibly non-string)
// data value here. Described in https://tools.ietf.org/html/rfc4648#section-4
// +optional
Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
// stringData allows specifying non-binary secret data in string form.
// It is provided as a write-only convenience method.
// All keys and values are merged into the data field on write, overwriting any existing values.
// It is never output when reading from the API.
// +k8s:conversion-gen=false
// +optional
StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"`
// Used to facilitate programmatic handling of secret data.
// +optional
Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"`
}
const MaxSecretSize = 1 * 1024 * 1024
type SecretType string
const (
// SecretTypeOpaque is the default. Arbitrary user-defined data
SecretTypeOpaque SecretType = "Opaque"
// SecretTypeServiceAccountToken contains a token that identifies a service account to the API
//
// Required fields:
// - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies
// - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies
// - Secret.Data["token"] - a token that identifies the service account to the API
SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token"
// ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountNameKey = "kubernetes.io/service-account.name"
// ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets
ServiceAccountUIDKey = "kubernetes.io/service-account.uid"
// ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets
ServiceAccountTokenKey = "token"
// ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets
ServiceAccountKubeconfigKey = "kubernetes.kubeconfig"
// ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets
ServiceAccountRootCAKey = "ca.crt"
// ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls
ServiceAccountNamespaceKey = "namespace"
// SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg
//
// Required fields:
// - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file
SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg"
// DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets
DockerConfigKey = ".dockercfg"
// SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json
//
// Required fields:
// - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file
SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson"
// DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets
DockerConfigJsonKey = ".dockerconfigjson"
// SecretTypeBasicAuth contains data needed for basic authentication.
//
// Required at least one of fields:
// - Secret.Data["username"] - username used for authentication
// - Secret.Data["password"] - password or token needed for authentication
SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth"
// BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets
BasicAuthUsernameKey = "username"
// BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets
BasicAuthPasswordKey = "password"
// SecretTypeSSHAuth contains data needed for SSH authetication.
//
// Required field:
// - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication
SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth"
// SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets
SSHAuthPrivateKey = "ssh-privatekey"
// SecretTypeTLS contains information about a TLS client or server secret. It
// is primarily used with TLS termination of the Ingress resource, but may be
// used in other types.
//
// Required fields:
// - Secret.Data["tls.key"] - TLS private key.
// Secret.Data["tls.crt"] - TLS certificate.
// TODO: Consider supporting different formats, specifying CA/destinationCA.
SecretTypeTLS SecretType = "kubernetes.io/tls"
// TLSCertKey is the key for tls certificates in a TLS secert.
TLSCertKey = "tls.crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls.key"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SecretList is a list of Secret.
type SecretList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is a list of secret objects.
// More info: https://kubernetes.io/docs/concepts/configuration/secret
Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMap holds configuration data for pods to consume.
type ConfigMap struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Data contains the configuration data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// Values with non-UTF-8 byte sequences must use the BinaryData field.
// The keys stored in Data must not overlap with the keys in
// the BinaryData field, this is enforced during validation process.
// +optional
Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"`
// BinaryData contains the binary data.
// Each key must consist of alphanumeric characters, '-', '_' or '.'.
// BinaryData can contain byte sequences that are not in the UTF-8 range.
// The keys stored in BinaryData must not overlap with the ones in
// the Data field, this is enforced during validation process.
// Using this field will require 1.10+ apiserver and
// kubelet.
// +optional
BinaryData map[string][]byte `json:"binaryData,omitempty" protobuf:"bytes,3,rep,name=binaryData"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ConfigMapList is a resource containing a list of ConfigMap objects.
type ConfigMapList struct {
metav1.TypeMeta `json:",inline"`
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of ConfigMaps.
Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Type and constants for component health validation.
type ComponentConditionType string
// These are the valid conditions for the component.
const (
ComponentHealthy ComponentConditionType = "Healthy"
)
// Information about the condition of a component.
type ComponentCondition struct {
// Type of condition for a component.
// Valid value: "Healthy"
Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"`
// Status of the condition for a component.
// Valid values for "Healthy": "True", "False", or "Unknown".
Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"`
// Message about the condition for a component.
// For example, information about a health check.
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// Condition error code for a component.
// For example, a health check error code.
// +optional
Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ComponentStatus (and ComponentStatusList) holds the cluster validation info.
type ComponentStatus struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of component conditions observed
// +optional
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Status of all the conditions for the component as a list of ComponentStatus objects.
type ComponentStatusList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ComponentStatus objects.
Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// DownwardAPIVolumeSource represents a volume containing downward API info.
// Downward API volumes support ownership management and SELinux relabeling.
type DownwardAPIVolumeSource struct {
// Items is a list of downward API volume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
// Optional: mode bits to use on created files by default. Must be a
// value between 0 and 0777. Defaults to 0644.
// Directories within the path are not affected by this setting.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"`
}
const (
DownwardAPIVolumeSourceDefaultMode int32 = 0644
)
// DownwardAPIVolumeFile represents information to create the file containing the pod field
type DownwardAPIVolumeFile struct {
// Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'
Path string `json:"path" protobuf:"bytes,1,opt,name=path"`
// Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.
// +optional
FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"`
// Selects a resource of the container: only resources limits and requests
// (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
// +optional
ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"`
// Optional: mode bits to use on this file, must be a value between 0
// and 0777. If not specified, the volume defaultMode will be used.
// This might be in conflict with other options that affect the file
// mode, like fsGroup, and the result can be other mode bits set.
// +optional
Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"`
}
// Represents downward API info for projecting into a projected volume.
// Note that this is identical to a downwardAPI volume source without the default
// mode.
type DownwardAPIProjection struct {
// Items is a list of DownwardAPIVolume file
// +optional
Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"`
}
// SecurityContext holds security configuration that will be applied to a container.
// Some fields are present in both SecurityContext and PodSecurityContext. When both
// are set, the values in SecurityContext take precedence.
type SecurityContext struct {
// The capabilities to add/drop when running containers.
// Defaults to the default set of capabilities granted by the container runtime.
// +optional
Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"`
// Run container in privileged mode.
// Processes in privileged containers are essentially equivalent to root on the host.
// Defaults to false.
// +optional
Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"`
// The SELinux context to be applied to the container.
// If unspecified, the container runtime will allocate a random SELinux context for each
// container. May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"`
// The UID to run the entrypoint of the container process.
// Defaults to user specified in image metadata if unspecified.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"`
// The GID to run the entrypoint of the container process.
// Uses runtime default if unset.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,8,opt,name=runAsGroup"`
// Indicates that the container must run as a non-root user.
// If true, the Kubelet will validate the image at runtime to ensure that it
// does not run as UID 0 (root) and fail to start the container if it does.
// If unset or false, no such validation will be performed.
// May also be set in PodSecurityContext. If set in both SecurityContext and
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"`
// Whether this container has a read-only root filesystem.
// Default is false.
// +optional
ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"`
// AllowPrivilegeEscalation controls whether a process can gain more
// privileges than its parent process. This bool directly controls if
// the no_new_privs flag will be set on the container process.
// AllowPrivilegeEscalation is true always when the container is:
// 1) run as Privileged
// 2) has CAP_SYS_ADMIN
// +optional
AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"`
}
// SELinuxOptions are the labels to be applied to the container
type SELinuxOptions struct {
// User is a SELinux user label that applies to the container.
// +optional
User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"`
// Role is a SELinux role label that applies to the container.
// +optional
Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"`
// Type is a SELinux type label that applies to the container.
// +optional
Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"`
// Level is SELinux level label that applies to the container.
// +optional
Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// RangeAllocation is not a public type.
type RangeAllocation struct {
metav1.TypeMeta `json:",inline"`
// Standard object's metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Range is string that identifies the range represented by 'data'.
Range string `json:"range" protobuf:"bytes,2,opt,name=range"`
// Data is a bit array containing all allocated addresses in the previous segment.
Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"`
}
const (
// "default-scheduler" is the name of default scheduler.
DefaultSchedulerName = "default-scheduler"
// RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule
// corresponding to every RequiredDuringScheduling affinity rule.
// When the --hard-pod-affinity-weight scheduler flag is not specified,
// DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule.
DefaultHardPodAffinitySymmetricWeight int32 = 1
)
// Sysctl defines a kernel parameter to be set
type Sysctl struct {
// Name of a property to set
Name string `protobuf:"bytes,1,opt,name=name"`
// Value of a property to set
Value string `protobuf:"bytes,2,opt,name=value"`
}
// NodeResources is an object for conveying resource information about a node.
// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.
type NodeResources struct {
// Capacity represents the available resources of a node
Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"`
}
const (
// Enable stdin for remote command execution
ExecStdinParam = "input"
// Enable stdout for remote command execution
ExecStdoutParam = "output"
// Enable stderr for remote command execution
ExecStderrParam = "error"
// Enable TTY for remote command execution
ExecTTYParam = "tty"
// Command to run for remote command execution
ExecCommandParam = "command"
// Name of header that specifies stream type
StreamType = "streamType"
// Value for streamType header for stdin stream
StreamTypeStdin = "stdin"
// Value for streamType header for stdout stream
StreamTypeStdout = "stdout"
// Value for streamType header for stderr stream
StreamTypeStderr = "stderr"
// Value for streamType header for data stream
StreamTypeData = "data"
// Value for streamType header for error stream
StreamTypeError = "error"
// Value for streamType header for terminal resize stream
StreamTypeResize = "resize"
// Name of header that specifies the port being forwarded
PortHeader = "port"
// Name of header that specifies a request ID used to associate the error
// and data streams for a single forwarded connection
PortForwardRequestIDHeader = "requestID"
)
|
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bufio"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"os/signal"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"syscall"
"time"
version "github.com/hashicorp/go-version"
homedir "github.com/mitchellh/go-homedir"
)
const (
bazelReal = "BAZEL_REAL"
skipWrapperEnv = "BAZELISK_SKIP_WRAPPER"
wrapperPath = "./tools/bazel"
bazelUpstream = "bazelbuild"
)
var (
BazeliskVersion = "development"
)
func findWorkspaceRoot(root string) string {
if _, err := os.Stat(filepath.Join(root, "WORKSPACE")); err == nil {
return root
}
parentDirectory := filepath.Dir(root)
if parentDirectory == root {
return ""
}
return findWorkspaceRoot(parentDirectory)
}
func getBazelVersion() (string, error) {
// Check in this order:
// - env var "USE_BAZEL_VERSION" is set to a specific version.
// - env var "USE_NIGHTLY_BAZEL" or "USE_BAZEL_NIGHTLY" is set -> latest
// nightly. (TODO)
// - env var "USE_CANARY_BAZEL" or "USE_BAZEL_CANARY" is set -> latest
// rc. (TODO)
// - the file workspace_root/tools/bazel exists -> that version. (TODO)
// - workspace_root/.bazelversion exists -> read contents, that version.
// - workspace_root/WORKSPACE contains a version -> that version. (TODO)
// - fallback: latest release
bazelVersion := os.Getenv("USE_BAZEL_VERSION")
if len(bazelVersion) != 0 {
return bazelVersion, nil
}
workingDirectory, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("could not get working directory: %v", err)
}
workspaceRoot := findWorkspaceRoot(workingDirectory)
if len(workspaceRoot) != 0 {
bazelVersionPath := filepath.Join(workspaceRoot, ".bazelversion")
if _, err := os.Stat(bazelVersionPath); err == nil {
f, err := os.Open(bazelVersionPath)
if err != nil {
return "", fmt.Errorf("could not read %s: %v", bazelVersionPath, err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
scanner.Scan()
bazelVersion := scanner.Text()
if err := scanner.Err(); err != nil {
return "", fmt.Errorf("could not read version from file %s: %v", bazelVersion, err)
}
if len(bazelVersion) != 0 {
return bazelVersion, nil
}
}
}
return "latest", nil
}
func parseBazelForkAndVersion(bazelForkAndVersion string) (string, string, error) {
var bazelFork, bazelVersion string
versionInfo := strings.Split(bazelForkAndVersion, "/")
if len(versionInfo) == 1 {
bazelFork, bazelVersion = bazelUpstream, versionInfo[0]
} else if len(versionInfo) == 2 {
bazelFork, bazelVersion = versionInfo[0], versionInfo[1]
} else {
return "", "", fmt.Errorf("invalid version \"%s\", could not parse version with more than one slash", bazelForkAndVersion)
}
return bazelFork, bazelVersion, nil
}
type release struct {
TagName string `json:"tag_name"`
Prerelease bool `json:"prerelease"`
}
func readRemoteFile(url string, token string) ([]byte, error) {
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("could not create request: %v", err)
}
if token != "" {
req.Header.Set("Authorization", "token "+token)
}
res, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("could not fetch %s: %v", url, err)
}
defer res.Body.Close()
if res.StatusCode != 200 {
return nil, fmt.Errorf("unexpected status code while reading %s: %v", url, res.StatusCode)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("failed to read content at %s: %v", url, err)
}
return body, nil
}
// maybeDownload will download a file from the given url and cache the result under bazeliskHome.
// It skips the download if the file already exists and is not outdated.
// description is used only to provide better error messages.
func maybeDownload(bazeliskHome, url, filename, description string) ([]byte, error) {
cachePath := filepath.Join(bazeliskHome, filename)
if cacheStat, err := os.Stat(cachePath); err == nil {
if time.Since(cacheStat.ModTime()).Hours() < 1 {
res, err := ioutil.ReadFile(cachePath)
if err != nil {
return nil, fmt.Errorf("could not read %s: %v", cachePath, err)
}
return res, nil
}
}
// We could also use go-github here, but I can't get it to build with Bazel's rules_go and it pulls in a lot of dependencies.
body, err := readRemoteFile(url, os.Getenv("BAZELISK_GITHUB_TOKEN"))
if err != nil {
return nil, fmt.Errorf("could not download %s: %v", description, err)
}
err = ioutil.WriteFile(cachePath, body, 0666)
if err != nil {
return nil, fmt.Errorf("could not create %s: %v", cachePath, err)
}
return body, nil
}
func resolveLatestVersion(bazeliskHome, bazelFork string, offset int) (string, error) {
url := fmt.Sprintf("https://api.github.com/repos/%s/bazel/releases", bazelFork)
releasesJSON, err := maybeDownload(bazeliskHome, url, bazelFork+"-releases.json", "list of Bazel releases from github.com/"+bazelFork)
if err != nil {
return "", fmt.Errorf("could not get releases from github.com/%s/bazel: %v", bazelFork, err)
}
var releases []release
if err := json.Unmarshal(releasesJSON, &releases); err != nil {
return "", fmt.Errorf("could not parse JSON into list of releases: %v", err)
}
var tags []string
for _, release := range releases {
if release.Prerelease {
continue
}
tags = append(tags, release.TagName)
}
return getNthMostRecentVersion(tags, offset)
}
func getNthMostRecentVersion(versions []string, offset int) (string, error) {
if offset >= len(versions) {
return "", fmt.Errorf("cannot resolve version \"latest-%d\": There are only %d Bazel versions", offset, len(versions))
}
wrappers := make([]*version.Version, len(versions))
for i, v := range versions {
wrapper, err := version.NewVersion(v)
if err != nil {
log.Printf("WARN: Could not parse version: %s", v)
}
wrappers[i] = wrapper
}
sort.Sort(version.Collection(wrappers))
return wrappers[len(wrappers)-1-offset].Original(), nil
}
type gcsListResponse struct {
Prefixes []string `json:"prefixes"`
}
func resolveLatestRcVersion() (string, error) {
versions, err := listDirectoriesInReleaseBucket("")
if err != nil {
return "", fmt.Errorf("could not list Bazel versions in GCS bucket: %v", err)
}
latestVersion, err := getHighestBazelVersion(versions)
if err != nil {
return "", fmt.Errorf("got invalid version number: %v", err)
}
// Append slash to match directories
rcVersions, err := listDirectoriesInReleaseBucket(latestVersion + "/")
if err != nil {
return "", fmt.Errorf("could not list release candidates for latest release: %v", err)
}
return getHighestRcVersion(rcVersions)
}
func listDirectoriesInReleaseBucket(prefix string) ([]string, error) {
url := "https://www.googleapis.com/storage/v1/b/bazel/o?delimiter=/"
if prefix != "" {
url = fmt.Sprintf("%s&prefix=%s", url, prefix)
}
content, err := readRemoteFile(url, "")
if err != nil {
return nil, fmt.Errorf("could not list GCS objects at %s: %v", url, err)
}
var response gcsListResponse
if err := json.Unmarshal(content, &response); err != nil {
return nil, fmt.Errorf("could not parse GCS index JSON: %v", err)
}
return response.Prefixes, nil
}
func getHighestBazelVersion(versions []string) (string, error) {
for i, v := range versions {
versions[i] = strings.TrimSuffix(v, "/")
}
return getNthMostRecentVersion(versions, 0)
}
func getHighestRcVersion(versions []string) (string, error) {
var version string
var lastRc int
re := regexp.MustCompile(`(\d+.\d+.\d+)/rc(\d+)/`)
for _, v := range versions {
// Fallback: use latest release if there is no active RC.
if strings.Index(v, "release") > -1 {
return strings.Split(v, "/")[0], nil
}
m := re.FindStringSubmatch(v)
version = m[1]
rc, err := strconv.Atoi(m[2])
if err != nil {
return "", fmt.Errorf("Invalid version number %s: %v", strings.TrimSuffix(v, "/"), err)
}
if rc > lastRc {
lastRc = rc
}
}
return fmt.Sprintf("%src%d", version, lastRc), nil
}
func resolveVersionLabel(bazeliskHome, bazelFork, bazelVersion string) (string, bool, error) {
if bazelFork == bazelUpstream {
// Returns three values:
// 1. The label of a Blaze release (if the label resolves to a release) or a commit (for unreleased binaries),
// 2. Whether the first value refers to a commit,
// 3. An error.
lastGreenCommitPathSuffixes := map[string]string{
"last_green": "github.com/bazelbuild/bazel.git/bazel-bazel",
"last_downstream_green": "downstream_pipeline",
}
if pathSuffix, ok := lastGreenCommitPathSuffixes[bazelVersion]; ok {
commit, err := getLastGreenCommit(pathSuffix)
if err != nil {
return "", false, fmt.Errorf("cannot resolve last green commit: %v", err)
}
return commit, true, nil
}
if bazelVersion == "last_rc" {
version, err := resolveLatestRcVersion()
return version, false, err
}
}
r := regexp.MustCompile(`^latest(?:-(?P<offset>\d+))?$`)
match := r.FindStringSubmatch(bazelVersion)
if match != nil {
offset := 0
if match[1] != "" {
var err error
offset, err = strconv.Atoi(match[1])
if err != nil {
return "", false, fmt.Errorf("invalid version \"%s\", could not parse offset: %v", bazelVersion, err)
}
}
version, err := resolveLatestVersion(bazeliskHome, bazelFork, offset)
return version, false, err
}
return bazelVersion, false, nil
}
const lastGreenBasePath = "https://storage.googleapis.com/bazel-untrusted-builds/last_green_commit/"
func getLastGreenCommit(pathSuffix string) (string, error) {
content, err := readRemoteFile(lastGreenBasePath+pathSuffix, "")
if err != nil {
return "", fmt.Errorf("could not determine last green commit: %v", err)
}
return strings.TrimSpace(string(content)), nil
}
func determineBazelFilename(version string) (string, error) {
var machineName string
switch runtime.GOARCH {
case "amd64":
machineName = "x86_64"
default:
return "", fmt.Errorf("unsupported machine architecture \"%s\", must be x86_64", runtime.GOARCH)
}
var osName string
switch runtime.GOOS {
case "darwin", "linux", "windows":
osName = runtime.GOOS
default:
return "", fmt.Errorf("unsupported operating system \"%s\", must be Linux, macOS or Windows", runtime.GOOS)
}
filenameSuffix := ""
if runtime.GOOS == "windows" {
filenameSuffix = ".exe"
}
return fmt.Sprintf("bazel-%s-%s-%s%s", version, osName, machineName, filenameSuffix), nil
}
func determineURL(fork string, version string, isCommit bool, filename string) string {
if isCommit {
var platforms = map[string]string{"darwin": "macos", "linux": "ubuntu1404", "windows": "windows"}
// No need to check the OS thanks to determineBazelFilename().
log.Printf("Using unreleased version at commit %s", version)
return fmt.Sprintf("https://storage.googleapis.com/bazel-builds/artifacts/%s/%s/bazel", platforms[runtime.GOOS], version)
}
kind := "release"
if strings.Contains(version, "rc") {
versionComponents := strings.Split(version, "rc")
// Replace version with the part before rc
version = versionComponents[0]
kind = "rc" + versionComponents[1]
}
if fork == bazelUpstream {
return fmt.Sprintf("https://releases.bazel.build/%s/%s/%s", version, kind, filename)
}
return fmt.Sprintf("https://github.com/%s/bazel/releases/download/%s/%s", fork, version, filename)
}
func downloadBazel(fork string, version string, isCommit bool, directory string) (string, error) {
filename, err := determineBazelFilename(version)
if err != nil {
return "", fmt.Errorf("could not determine filename to use for Bazel binary: %v", err)
}
url := determineURL(fork, version, isCommit, filename)
destinationPath := filepath.Join(directory, filename)
if _, err := os.Stat(destinationPath); err != nil {
tmpfile, err := ioutil.TempFile(directory, "download")
if err != nil {
return "", fmt.Errorf("could not create temporary file: %v", err)
}
defer func() {
err := tmpfile.Close()
if err == nil {
os.Remove(tmpfile.Name())
}
}()
log.Printf("Downloading %s...", url)
resp, err := http.Get(url)
if err != nil {
return "", fmt.Errorf("HTTP GET %s failed: %v", url, err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return "", fmt.Errorf("HTTP GET %s failed with error %v", url, resp.StatusCode)
}
_, err = io.Copy(tmpfile, resp.Body)
if err != nil {
return "", fmt.Errorf("could not copy from %s to %s: %v", url, tmpfile.Name(), err)
}
err = os.Chmod(tmpfile.Name(), 0755)
if err != nil {
return "", fmt.Errorf("could not chmod file %s: %v", tmpfile.Name(), err)
}
tmpfile.Close()
err = os.Rename(tmpfile.Name(), destinationPath)
if err != nil {
return "", fmt.Errorf("could not move %s to %s: %v", tmpfile.Name(), destinationPath, err)
}
}
return destinationPath, nil
}
func maybeDelegateToWrapper(bazel string) string {
if os.Getenv(skipWrapperEnv) != "" {
return bazel
}
wd, err := os.Getwd()
if err != nil {
return bazel
}
root := findWorkspaceRoot(wd)
wrapper := filepath.Join(root, wrapperPath)
if stat, err := os.Stat(wrapper); err != nil || stat.Mode().Perm()&0001 == 0 {
return bazel
}
return wrapper
}
func runBazel(bazel string, args []string) (int, error) {
execPath := maybeDelegateToWrapper(bazel)
if execPath != bazel {
os.Setenv(bazelReal, bazel)
}
cmd := exec.Command(execPath, args...)
cmd.Env = append(os.Environ(), skipWrapperEnv+"=true")
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Start()
if err != nil {
return 1, fmt.Errorf("could not start Bazel: %v", err)
}
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
s := <-c
if runtime.GOOS != "windows" {
cmd.Process.Signal(s)
} else {
cmd.Process.Kill()
}
}()
err = cmd.Wait()
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
waitStatus := exitError.Sys().(syscall.WaitStatus)
return waitStatus.ExitStatus(), nil
}
return 1, fmt.Errorf("could not launch Bazel: %v", err)
}
return 0, nil
}
type issue struct {
Title string `json:"title"`
}
type issueList struct {
Items []issue `json:"items"`
}
func getIncompatibleFlags(bazeliskHome, resolvedBazelVersion string) ([]string, error) {
var result []string
// GitHub labels use only major and minor version, we ignore the patch number (and any other suffix).
re := regexp.MustCompile(`^\d+\.\d+`)
version := re.FindString(resolvedBazelVersion)
if len(version) == 0 {
return nil, fmt.Errorf("invalid version %v", resolvedBazelVersion)
}
url := "https://api.github.com/search/issues?per_page=100&q=repo:bazelbuild/bazel+label:migration-" + version
issuesJSON, err := maybeDownload(bazeliskHome, url, "flags-"+version, "list of flags from GitHub")
if err != nil {
return nil, fmt.Errorf("could not get issues from GitHub: %v", err)
}
var issueList issueList
if err := json.Unmarshal(issuesJSON, &issueList); err != nil {
return nil, fmt.Errorf("could not parse JSON into list of issues: %v", err)
}
re = regexp.MustCompile(`^incompatible_\w+`)
for _, issue := range issueList.Items {
flag := re.FindString(issue.Title)
if len(flag) > 0 {
result = append(result, "--"+flag)
}
}
sort.Strings(result)
return result, nil
}
// insertArgs will insert newArgs in baseArgs. If baseArgs contains the
// "--" argument, newArgs will be inserted before that. Otherwise, newArgs
// is appended.
func insertArgs(baseArgs []string, newArgs []string) []string {
var result []string
inserted := false
for _, arg := range baseArgs {
if !inserted && arg == "--" {
result = append(result, newArgs...)
inserted = true
}
result = append(result, arg)
}
if !inserted {
result = append(result, newArgs...)
}
return result
}
func shutdownIfNeeded(bazelPath string) {
bazeliskClean := os.Getenv("BAZELISK_SHUTDOWN")
if len(bazeliskClean) == 0 {
return
}
fmt.Printf("bazel shutdown\n")
exitCode, err := runBazel(bazelPath, []string{"shutdown"})
fmt.Printf("\n")
if err != nil {
log.Fatalf("failed to run bazel shutdown: %v", err)
}
if exitCode != 0 {
fmt.Printf("Failure: shutdown command failed.\n")
os.Exit(exitCode)
}
}
func cleanIfNeeded(bazelPath string) {
bazeliskClean := os.Getenv("BAZELISK_CLEAN")
if len(bazeliskClean) == 0 {
return
}
fmt.Printf("bazel clean --expunge\n")
exitCode, err := runBazel(bazelPath, []string{"clean", "--expunge"})
fmt.Printf("\n")
if err != nil {
log.Fatalf("failed to run clean: %v", err)
}
if exitCode != 0 {
fmt.Printf("Failure: clean command failed.\n")
os.Exit(exitCode)
}
}
// migrate will run Bazel with each newArgs separately and report which ones are failing.
func migrate(bazelPath string, baseArgs []string, newArgs []string) {
// 1. Try with all the flags.
args := insertArgs(baseArgs, newArgs)
fmt.Printf("\n\n--- Running Bazel with all incompatible flags\n\n")
shutdownIfNeeded(bazelPath)
cleanIfNeeded(bazelPath)
fmt.Printf("bazel %s\n", strings.Join(args, " "))
exitCode, err := runBazel(bazelPath, args)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
}
if exitCode == 0 {
fmt.Printf("Success: No migration needed.\n")
os.Exit(0)
}
// 2. Try with no flags, as a sanity check.
args = baseArgs
fmt.Printf("\n\n--- Running Bazel with no incompatible flags\n\n")
shutdownIfNeeded(bazelPath)
cleanIfNeeded(bazelPath)
fmt.Printf("bazel %s\n", strings.Join(args, " "))
exitCode, err = runBazel(bazelPath, args)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
}
if exitCode != 0 {
fmt.Printf("Failure: Command failed, even without incompatible flags.\n")
os.Exit(exitCode)
}
// 3. Try with each flag separately.
var passList []string
var failList []string
for _, arg := range newArgs {
args = insertArgs(baseArgs, []string{arg})
fmt.Printf("\n\n--- Running Bazel with %s\n\n", arg)
shutdownIfNeeded(bazelPath)
cleanIfNeeded(bazelPath)
fmt.Printf("bazel %s\n", strings.Join(args, " "))
exitCode, err = runBazel(bazelPath, args)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
}
if exitCode == 0 {
passList = append(passList, arg)
} else {
failList = append(failList, arg)
}
}
// 4. Print report
fmt.Printf("\n\n+++ Result\n\n")
fmt.Printf("Command was successful with the following flags:\n")
for _, arg := range passList {
fmt.Printf(" %s\n", arg)
}
fmt.Printf("\n")
fmt.Printf("Migration is needed for the following flags:\n")
for _, arg := range failList {
fmt.Printf(" %s\n", arg)
}
os.Exit(1)
}
func main() {
bazeliskHome := os.Getenv("BAZELISK_HOME")
if len(bazeliskHome) == 0 {
userCacheDir, err := os.UserCacheDir()
if err != nil {
log.Fatalf("could not get the user's cache directory: %v", err)
}
bazeliskHome = filepath.Join(userCacheDir, "bazelisk")
}
err := os.MkdirAll(bazeliskHome, 0755)
if err != nil {
log.Fatalf("could not create directory %s: %v", bazeliskHome, err)
}
bazelVersionString, err := getBazelVersion()
if err != nil {
log.Fatalf("could not get Bazel version: %v", err)
}
bazelPath, err := homedir.Expand(bazelVersionString)
if err != nil {
log.Fatalf("could not expand home directory in path: %v", err)
}
// If the Bazel version is an absolute path to a Bazel binary in the filesystem, we can
// use it directly. In that case, we don't know which exact version it is, though.
resolvedBazelVersion := "unknown"
isCommit := false
// If we aren't using a local Bazel binary, we'll have to parse the version string and
// download the version that the user wants.
if !filepath.IsAbs(bazelPath) {
bazelFork, bazelVersion, err := parseBazelForkAndVersion(bazelVersionString)
if err != nil {
log.Fatalf("could not parse Bazel fork and version: %v", err)
}
resolvedBazelVersion, isCommit, err = resolveVersionLabel(bazeliskHome, bazelFork, bazelVersion)
if err != nil {
log.Fatalf("could not resolve the version '%s' to an actual version number: %v", bazelVersion, err)
}
bazelDirectory := filepath.Join(bazeliskHome, "bin", bazelFork)
err = os.MkdirAll(bazelDirectory, 0755)
if err != nil {
log.Fatalf("could not create directory %s: %v", bazelDirectory, err)
}
bazelPath, err = downloadBazel(bazelFork, resolvedBazelVersion, isCommit, bazelDirectory)
if err != nil {
log.Fatalf("could not download Bazel: %v", err)
}
}
args := os.Args[1:]
// --strict and --migrate must be the first argument.
if len(args) > 0 && (args[0] == "--strict" || args[0] == "--migrate") {
cmd := args[0]
newFlags, err := getIncompatibleFlags(bazeliskHome, resolvedBazelVersion)
if err != nil {
log.Fatalf("could not get the list of incompatible flags: %v", err)
}
if cmd == "--migrate" {
migrate(bazelPath, args[1:], newFlags)
} else {
// When --strict is present, it expands to the list of --incompatible_ flags
// that should be enabled for the given Bazel version.
args = insertArgs(args[1:], newFlags)
}
}
// print bazelisk version information if "version" is the first argument
// bazel version is executed after this command
if len(args) > 0 && args[0] == "version" {
// Check if the --gnu_format flag is set, if that is the case,
// the version is printed differently
var gnuFormat bool
for _, arg := range args {
if arg == "--gnu_format" {
gnuFormat = true
break
}
}
if gnuFormat {
fmt.Printf("Bazelisk %s\n", BazeliskVersion)
} else {
fmt.Printf("Bazelisk version: %s\n", BazeliskVersion)
}
}
exitCode, err := runBazel(bazelPath, args)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
}
os.Exit(exitCode)
}
Support WORKSPACE.bazel file (#100)
When finding workspace root, also check the existence of WORKSPACE.bazel
file. Bazel supports WOKRSPACE.bazel file from 1.2.
Related https://github.com/bazelbuild/bazel/pull/10175
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bufio"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"os/signal"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"syscall"
"time"
version "github.com/hashicorp/go-version"
homedir "github.com/mitchellh/go-homedir"
)
const (
bazelReal = "BAZEL_REAL"
skipWrapperEnv = "BAZELISK_SKIP_WRAPPER"
wrapperPath = "./tools/bazel"
bazelUpstream = "bazelbuild"
)
var (
BazeliskVersion = "development"
)
func findWorkspaceRoot(root string) string {
if _, err := os.Stat(filepath.Join(root, "WORKSPACE")); err == nil {
return root
}
if _, err := os.Stat(filepath.Join(root, "WORKSPACE.bazel")); err == nil {
return root
}
parentDirectory := filepath.Dir(root)
if parentDirectory == root {
return ""
}
return findWorkspaceRoot(parentDirectory)
}
func getBazelVersion() (string, error) {
// Check in this order:
// - env var "USE_BAZEL_VERSION" is set to a specific version.
// - env var "USE_NIGHTLY_BAZEL" or "USE_BAZEL_NIGHTLY" is set -> latest
// nightly. (TODO)
// - env var "USE_CANARY_BAZEL" or "USE_BAZEL_CANARY" is set -> latest
// rc. (TODO)
// - the file workspace_root/tools/bazel exists -> that version. (TODO)
// - workspace_root/.bazelversion exists -> read contents, that version.
// - workspace_root/WORKSPACE contains a version -> that version. (TODO)
// - fallback: latest release
bazelVersion := os.Getenv("USE_BAZEL_VERSION")
if len(bazelVersion) != 0 {
return bazelVersion, nil
}
workingDirectory, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("could not get working directory: %v", err)
}
workspaceRoot := findWorkspaceRoot(workingDirectory)
if len(workspaceRoot) != 0 {
bazelVersionPath := filepath.Join(workspaceRoot, ".bazelversion")
if _, err := os.Stat(bazelVersionPath); err == nil {
f, err := os.Open(bazelVersionPath)
if err != nil {
return "", fmt.Errorf("could not read %s: %v", bazelVersionPath, err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
scanner.Scan()
bazelVersion := scanner.Text()
if err := scanner.Err(); err != nil {
return "", fmt.Errorf("could not read version from file %s: %v", bazelVersion, err)
}
if len(bazelVersion) != 0 {
return bazelVersion, nil
}
}
}
return "latest", nil
}
func parseBazelForkAndVersion(bazelForkAndVersion string) (string, string, error) {
var bazelFork, bazelVersion string
versionInfo := strings.Split(bazelForkAndVersion, "/")
if len(versionInfo) == 1 {
bazelFork, bazelVersion = bazelUpstream, versionInfo[0]
} else if len(versionInfo) == 2 {
bazelFork, bazelVersion = versionInfo[0], versionInfo[1]
} else {
return "", "", fmt.Errorf("invalid version \"%s\", could not parse version with more than one slash", bazelForkAndVersion)
}
return bazelFork, bazelVersion, nil
}
type release struct {
TagName string `json:"tag_name"`
Prerelease bool `json:"prerelease"`
}
func readRemoteFile(url string, token string) ([]byte, error) {
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("could not create request: %v", err)
}
if token != "" {
req.Header.Set("Authorization", "token "+token)
}
res, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("could not fetch %s: %v", url, err)
}
defer res.Body.Close()
if res.StatusCode != 200 {
return nil, fmt.Errorf("unexpected status code while reading %s: %v", url, res.StatusCode)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, fmt.Errorf("failed to read content at %s: %v", url, err)
}
return body, nil
}
// maybeDownload will download a file from the given url and cache the result under bazeliskHome.
// It skips the download if the file already exists and is not outdated.
// description is used only to provide better error messages.
func maybeDownload(bazeliskHome, url, filename, description string) ([]byte, error) {
cachePath := filepath.Join(bazeliskHome, filename)
if cacheStat, err := os.Stat(cachePath); err == nil {
if time.Since(cacheStat.ModTime()).Hours() < 1 {
res, err := ioutil.ReadFile(cachePath)
if err != nil {
return nil, fmt.Errorf("could not read %s: %v", cachePath, err)
}
return res, nil
}
}
// We could also use go-github here, but I can't get it to build with Bazel's rules_go and it pulls in a lot of dependencies.
body, err := readRemoteFile(url, os.Getenv("BAZELISK_GITHUB_TOKEN"))
if err != nil {
return nil, fmt.Errorf("could not download %s: %v", description, err)
}
err = ioutil.WriteFile(cachePath, body, 0666)
if err != nil {
return nil, fmt.Errorf("could not create %s: %v", cachePath, err)
}
return body, nil
}
func resolveLatestVersion(bazeliskHome, bazelFork string, offset int) (string, error) {
url := fmt.Sprintf("https://api.github.com/repos/%s/bazel/releases", bazelFork)
releasesJSON, err := maybeDownload(bazeliskHome, url, bazelFork+"-releases.json", "list of Bazel releases from github.com/"+bazelFork)
if err != nil {
return "", fmt.Errorf("could not get releases from github.com/%s/bazel: %v", bazelFork, err)
}
var releases []release
if err := json.Unmarshal(releasesJSON, &releases); err != nil {
return "", fmt.Errorf("could not parse JSON into list of releases: %v", err)
}
var tags []string
for _, release := range releases {
if release.Prerelease {
continue
}
tags = append(tags, release.TagName)
}
return getNthMostRecentVersion(tags, offset)
}
func getNthMostRecentVersion(versions []string, offset int) (string, error) {
if offset >= len(versions) {
return "", fmt.Errorf("cannot resolve version \"latest-%d\": There are only %d Bazel versions", offset, len(versions))
}
wrappers := make([]*version.Version, len(versions))
for i, v := range versions {
wrapper, err := version.NewVersion(v)
if err != nil {
log.Printf("WARN: Could not parse version: %s", v)
}
wrappers[i] = wrapper
}
sort.Sort(version.Collection(wrappers))
return wrappers[len(wrappers)-1-offset].Original(), nil
}
type gcsListResponse struct {
Prefixes []string `json:"prefixes"`
}
func resolveLatestRcVersion() (string, error) {
versions, err := listDirectoriesInReleaseBucket("")
if err != nil {
return "", fmt.Errorf("could not list Bazel versions in GCS bucket: %v", err)
}
latestVersion, err := getHighestBazelVersion(versions)
if err != nil {
return "", fmt.Errorf("got invalid version number: %v", err)
}
// Append slash to match directories
rcVersions, err := listDirectoriesInReleaseBucket(latestVersion + "/")
if err != nil {
return "", fmt.Errorf("could not list release candidates for latest release: %v", err)
}
return getHighestRcVersion(rcVersions)
}
func listDirectoriesInReleaseBucket(prefix string) ([]string, error) {
url := "https://www.googleapis.com/storage/v1/b/bazel/o?delimiter=/"
if prefix != "" {
url = fmt.Sprintf("%s&prefix=%s", url, prefix)
}
content, err := readRemoteFile(url, "")
if err != nil {
return nil, fmt.Errorf("could not list GCS objects at %s: %v", url, err)
}
var response gcsListResponse
if err := json.Unmarshal(content, &response); err != nil {
return nil, fmt.Errorf("could not parse GCS index JSON: %v", err)
}
return response.Prefixes, nil
}
func getHighestBazelVersion(versions []string) (string, error) {
for i, v := range versions {
versions[i] = strings.TrimSuffix(v, "/")
}
return getNthMostRecentVersion(versions, 0)
}
func getHighestRcVersion(versions []string) (string, error) {
var version string
var lastRc int
re := regexp.MustCompile(`(\d+.\d+.\d+)/rc(\d+)/`)
for _, v := range versions {
// Fallback: use latest release if there is no active RC.
if strings.Index(v, "release") > -1 {
return strings.Split(v, "/")[0], nil
}
m := re.FindStringSubmatch(v)
version = m[1]
rc, err := strconv.Atoi(m[2])
if err != nil {
return "", fmt.Errorf("Invalid version number %s: %v", strings.TrimSuffix(v, "/"), err)
}
if rc > lastRc {
lastRc = rc
}
}
return fmt.Sprintf("%src%d", version, lastRc), nil
}
func resolveVersionLabel(bazeliskHome, bazelFork, bazelVersion string) (string, bool, error) {
if bazelFork == bazelUpstream {
// Returns three values:
// 1. The label of a Blaze release (if the label resolves to a release) or a commit (for unreleased binaries),
// 2. Whether the first value refers to a commit,
// 3. An error.
lastGreenCommitPathSuffixes := map[string]string{
"last_green": "github.com/bazelbuild/bazel.git/bazel-bazel",
"last_downstream_green": "downstream_pipeline",
}
if pathSuffix, ok := lastGreenCommitPathSuffixes[bazelVersion]; ok {
commit, err := getLastGreenCommit(pathSuffix)
if err != nil {
return "", false, fmt.Errorf("cannot resolve last green commit: %v", err)
}
return commit, true, nil
}
if bazelVersion == "last_rc" {
version, err := resolveLatestRcVersion()
return version, false, err
}
}
r := regexp.MustCompile(`^latest(?:-(?P<offset>\d+))?$`)
match := r.FindStringSubmatch(bazelVersion)
if match != nil {
offset := 0
if match[1] != "" {
var err error
offset, err = strconv.Atoi(match[1])
if err != nil {
return "", false, fmt.Errorf("invalid version \"%s\", could not parse offset: %v", bazelVersion, err)
}
}
version, err := resolveLatestVersion(bazeliskHome, bazelFork, offset)
return version, false, err
}
return bazelVersion, false, nil
}
const lastGreenBasePath = "https://storage.googleapis.com/bazel-untrusted-builds/last_green_commit/"
func getLastGreenCommit(pathSuffix string) (string, error) {
content, err := readRemoteFile(lastGreenBasePath+pathSuffix, "")
if err != nil {
return "", fmt.Errorf("could not determine last green commit: %v", err)
}
return strings.TrimSpace(string(content)), nil
}
func determineBazelFilename(version string) (string, error) {
var machineName string
switch runtime.GOARCH {
case "amd64":
machineName = "x86_64"
default:
return "", fmt.Errorf("unsupported machine architecture \"%s\", must be x86_64", runtime.GOARCH)
}
var osName string
switch runtime.GOOS {
case "darwin", "linux", "windows":
osName = runtime.GOOS
default:
return "", fmt.Errorf("unsupported operating system \"%s\", must be Linux, macOS or Windows", runtime.GOOS)
}
filenameSuffix := ""
if runtime.GOOS == "windows" {
filenameSuffix = ".exe"
}
return fmt.Sprintf("bazel-%s-%s-%s%s", version, osName, machineName, filenameSuffix), nil
}
func determineURL(fork string, version string, isCommit bool, filename string) string {
if isCommit {
var platforms = map[string]string{"darwin": "macos", "linux": "ubuntu1404", "windows": "windows"}
// No need to check the OS thanks to determineBazelFilename().
log.Printf("Using unreleased version at commit %s", version)
return fmt.Sprintf("https://storage.googleapis.com/bazel-builds/artifacts/%s/%s/bazel", platforms[runtime.GOOS], version)
}
kind := "release"
if strings.Contains(version, "rc") {
versionComponents := strings.Split(version, "rc")
// Replace version with the part before rc
version = versionComponents[0]
kind = "rc" + versionComponents[1]
}
if fork == bazelUpstream {
return fmt.Sprintf("https://releases.bazel.build/%s/%s/%s", version, kind, filename)
}
return fmt.Sprintf("https://github.com/%s/bazel/releases/download/%s/%s", fork, version, filename)
}
func downloadBazel(fork string, version string, isCommit bool, directory string) (string, error) {
filename, err := determineBazelFilename(version)
if err != nil {
return "", fmt.Errorf("could not determine filename to use for Bazel binary: %v", err)
}
url := determineURL(fork, version, isCommit, filename)
destinationPath := filepath.Join(directory, filename)
if _, err := os.Stat(destinationPath); err != nil {
tmpfile, err := ioutil.TempFile(directory, "download")
if err != nil {
return "", fmt.Errorf("could not create temporary file: %v", err)
}
defer func() {
err := tmpfile.Close()
if err == nil {
os.Remove(tmpfile.Name())
}
}()
log.Printf("Downloading %s...", url)
resp, err := http.Get(url)
if err != nil {
return "", fmt.Errorf("HTTP GET %s failed: %v", url, err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return "", fmt.Errorf("HTTP GET %s failed with error %v", url, resp.StatusCode)
}
_, err = io.Copy(tmpfile, resp.Body)
if err != nil {
return "", fmt.Errorf("could not copy from %s to %s: %v", url, tmpfile.Name(), err)
}
err = os.Chmod(tmpfile.Name(), 0755)
if err != nil {
return "", fmt.Errorf("could not chmod file %s: %v", tmpfile.Name(), err)
}
tmpfile.Close()
err = os.Rename(tmpfile.Name(), destinationPath)
if err != nil {
return "", fmt.Errorf("could not move %s to %s: %v", tmpfile.Name(), destinationPath, err)
}
}
return destinationPath, nil
}
func maybeDelegateToWrapper(bazel string) string {
if os.Getenv(skipWrapperEnv) != "" {
return bazel
}
wd, err := os.Getwd()
if err != nil {
return bazel
}
root := findWorkspaceRoot(wd)
wrapper := filepath.Join(root, wrapperPath)
if stat, err := os.Stat(wrapper); err != nil || stat.Mode().Perm()&0001 == 0 {
return bazel
}
return wrapper
}
func runBazel(bazel string, args []string) (int, error) {
execPath := maybeDelegateToWrapper(bazel)
if execPath != bazel {
os.Setenv(bazelReal, bazel)
}
cmd := exec.Command(execPath, args...)
cmd.Env = append(os.Environ(), skipWrapperEnv+"=true")
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Start()
if err != nil {
return 1, fmt.Errorf("could not start Bazel: %v", err)
}
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
s := <-c
if runtime.GOOS != "windows" {
cmd.Process.Signal(s)
} else {
cmd.Process.Kill()
}
}()
err = cmd.Wait()
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
waitStatus := exitError.Sys().(syscall.WaitStatus)
return waitStatus.ExitStatus(), nil
}
return 1, fmt.Errorf("could not launch Bazel: %v", err)
}
return 0, nil
}
type issue struct {
Title string `json:"title"`
}
type issueList struct {
Items []issue `json:"items"`
}
func getIncompatibleFlags(bazeliskHome, resolvedBazelVersion string) ([]string, error) {
var result []string
// GitHub labels use only major and minor version, we ignore the patch number (and any other suffix).
re := regexp.MustCompile(`^\d+\.\d+`)
version := re.FindString(resolvedBazelVersion)
if len(version) == 0 {
return nil, fmt.Errorf("invalid version %v", resolvedBazelVersion)
}
url := "https://api.github.com/search/issues?per_page=100&q=repo:bazelbuild/bazel+label:migration-" + version
issuesJSON, err := maybeDownload(bazeliskHome, url, "flags-"+version, "list of flags from GitHub")
if err != nil {
return nil, fmt.Errorf("could not get issues from GitHub: %v", err)
}
var issueList issueList
if err := json.Unmarshal(issuesJSON, &issueList); err != nil {
return nil, fmt.Errorf("could not parse JSON into list of issues: %v", err)
}
re = regexp.MustCompile(`^incompatible_\w+`)
for _, issue := range issueList.Items {
flag := re.FindString(issue.Title)
if len(flag) > 0 {
result = append(result, "--"+flag)
}
}
sort.Strings(result)
return result, nil
}
// insertArgs will insert newArgs in baseArgs. If baseArgs contains the
// "--" argument, newArgs will be inserted before that. Otherwise, newArgs
// is appended.
func insertArgs(baseArgs []string, newArgs []string) []string {
var result []string
inserted := false
for _, arg := range baseArgs {
if !inserted && arg == "--" {
result = append(result, newArgs...)
inserted = true
}
result = append(result, arg)
}
if !inserted {
result = append(result, newArgs...)
}
return result
}
func shutdownIfNeeded(bazelPath string) {
bazeliskClean := os.Getenv("BAZELISK_SHUTDOWN")
if len(bazeliskClean) == 0 {
return
}
fmt.Printf("bazel shutdown\n")
exitCode, err := runBazel(bazelPath, []string{"shutdown"})
fmt.Printf("\n")
if err != nil {
log.Fatalf("failed to run bazel shutdown: %v", err)
}
if exitCode != 0 {
fmt.Printf("Failure: shutdown command failed.\n")
os.Exit(exitCode)
}
}
func cleanIfNeeded(bazelPath string) {
bazeliskClean := os.Getenv("BAZELISK_CLEAN")
if len(bazeliskClean) == 0 {
return
}
fmt.Printf("bazel clean --expunge\n")
exitCode, err := runBazel(bazelPath, []string{"clean", "--expunge"})
fmt.Printf("\n")
if err != nil {
log.Fatalf("failed to run clean: %v", err)
}
if exitCode != 0 {
fmt.Printf("Failure: clean command failed.\n")
os.Exit(exitCode)
}
}
// migrate will run Bazel with each newArgs separately and report which ones are failing.
func migrate(bazelPath string, baseArgs []string, newArgs []string) {
// 1. Try with all the flags.
args := insertArgs(baseArgs, newArgs)
fmt.Printf("\n\n--- Running Bazel with all incompatible flags\n\n")
shutdownIfNeeded(bazelPath)
cleanIfNeeded(bazelPath)
fmt.Printf("bazel %s\n", strings.Join(args, " "))
exitCode, err := runBazel(bazelPath, args)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
}
if exitCode == 0 {
fmt.Printf("Success: No migration needed.\n")
os.Exit(0)
}
// 2. Try with no flags, as a sanity check.
args = baseArgs
fmt.Printf("\n\n--- Running Bazel with no incompatible flags\n\n")
shutdownIfNeeded(bazelPath)
cleanIfNeeded(bazelPath)
fmt.Printf("bazel %s\n", strings.Join(args, " "))
exitCode, err = runBazel(bazelPath, args)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
}
if exitCode != 0 {
fmt.Printf("Failure: Command failed, even without incompatible flags.\n")
os.Exit(exitCode)
}
// 3. Try with each flag separately.
var passList []string
var failList []string
for _, arg := range newArgs {
args = insertArgs(baseArgs, []string{arg})
fmt.Printf("\n\n--- Running Bazel with %s\n\n", arg)
shutdownIfNeeded(bazelPath)
cleanIfNeeded(bazelPath)
fmt.Printf("bazel %s\n", strings.Join(args, " "))
exitCode, err = runBazel(bazelPath, args)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
}
if exitCode == 0 {
passList = append(passList, arg)
} else {
failList = append(failList, arg)
}
}
// 4. Print report
fmt.Printf("\n\n+++ Result\n\n")
fmt.Printf("Command was successful with the following flags:\n")
for _, arg := range passList {
fmt.Printf(" %s\n", arg)
}
fmt.Printf("\n")
fmt.Printf("Migration is needed for the following flags:\n")
for _, arg := range failList {
fmt.Printf(" %s\n", arg)
}
os.Exit(1)
}
func main() {
bazeliskHome := os.Getenv("BAZELISK_HOME")
if len(bazeliskHome) == 0 {
userCacheDir, err := os.UserCacheDir()
if err != nil {
log.Fatalf("could not get the user's cache directory: %v", err)
}
bazeliskHome = filepath.Join(userCacheDir, "bazelisk")
}
err := os.MkdirAll(bazeliskHome, 0755)
if err != nil {
log.Fatalf("could not create directory %s: %v", bazeliskHome, err)
}
bazelVersionString, err := getBazelVersion()
if err != nil {
log.Fatalf("could not get Bazel version: %v", err)
}
bazelPath, err := homedir.Expand(bazelVersionString)
if err != nil {
log.Fatalf("could not expand home directory in path: %v", err)
}
// If the Bazel version is an absolute path to a Bazel binary in the filesystem, we can
// use it directly. In that case, we don't know which exact version it is, though.
resolvedBazelVersion := "unknown"
isCommit := false
// If we aren't using a local Bazel binary, we'll have to parse the version string and
// download the version that the user wants.
if !filepath.IsAbs(bazelPath) {
bazelFork, bazelVersion, err := parseBazelForkAndVersion(bazelVersionString)
if err != nil {
log.Fatalf("could not parse Bazel fork and version: %v", err)
}
resolvedBazelVersion, isCommit, err = resolveVersionLabel(bazeliskHome, bazelFork, bazelVersion)
if err != nil {
log.Fatalf("could not resolve the version '%s' to an actual version number: %v", bazelVersion, err)
}
bazelDirectory := filepath.Join(bazeliskHome, "bin", bazelFork)
err = os.MkdirAll(bazelDirectory, 0755)
if err != nil {
log.Fatalf("could not create directory %s: %v", bazelDirectory, err)
}
bazelPath, err = downloadBazel(bazelFork, resolvedBazelVersion, isCommit, bazelDirectory)
if err != nil {
log.Fatalf("could not download Bazel: %v", err)
}
}
args := os.Args[1:]
// --strict and --migrate must be the first argument.
if len(args) > 0 && (args[0] == "--strict" || args[0] == "--migrate") {
cmd := args[0]
newFlags, err := getIncompatibleFlags(bazeliskHome, resolvedBazelVersion)
if err != nil {
log.Fatalf("could not get the list of incompatible flags: %v", err)
}
if cmd == "--migrate" {
migrate(bazelPath, args[1:], newFlags)
} else {
// When --strict is present, it expands to the list of --incompatible_ flags
// that should be enabled for the given Bazel version.
args = insertArgs(args[1:], newFlags)
}
}
// print bazelisk version information if "version" is the first argument
// bazel version is executed after this command
if len(args) > 0 && args[0] == "version" {
// Check if the --gnu_format flag is set, if that is the case,
// the version is printed differently
var gnuFormat bool
for _, arg := range args {
if arg == "--gnu_format" {
gnuFormat = true
break
}
}
if gnuFormat {
fmt.Printf("Bazelisk %s\n", BazeliskVersion)
} else {
fmt.Printf("Bazelisk version: %s\n", BazeliskVersion)
}
}
exitCode, err := runBazel(bazelPath, args)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
}
os.Exit(exitCode)
}
|
// Copyright 2013 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"code.google.com/p/go.crypto/pbkdf2"
"crypto/sha512"
"encoding/json"
"fmt"
"github.com/globocom/config"
"github.com/globocom/tsuru/app"
"github.com/globocom/tsuru/auth"
"github.com/globocom/tsuru/db"
"github.com/globocom/tsuru/errors"
"io"
"io/ioutil"
"labix.org/v2/mgo/bson"
. "launchpad.net/gocheck"
"net/http"
"net/http/httptest"
"os"
"path"
"strconv"
"strings"
)
type AuthSuite struct {
conn *db.Storage
team *auth.Team
user *auth.User
}
var _ = Suite(&AuthSuite{})
func (s *AuthSuite) SetUpSuite(c *C) {
err := config.ReadConfigFile("../etc/tsuru.conf")
c.Assert(err, IsNil)
config.Set("database:name", "tsuru_api_auth_test")
s.conn, err = db.Conn()
c.Assert(err, IsNil)
s.createUserAndTeam(c)
}
func (s *AuthSuite) TearDownSuite(c *C) {
s.conn.Apps().Database.DropDatabase()
}
func (s *AuthSuite) TearDownTest(c *C) {
_, err := s.conn.Users().RemoveAll(bson.M{"email": bson.M{"$ne": s.user.Email}})
c.Assert(err, IsNil)
_, err = s.conn.Teams().RemoveAll(bson.M{"_id": bson.M{"$ne": s.team.Name}})
c.Assert(err, IsNil)
s.user.Password = "123"
s.user.HashPassword()
err = s.user.Update()
c.Assert(err, IsNil)
}
func (s *AuthSuite) createUserAndTeam(c *C) {
s.user = &auth.User{Email: "whydidifall@thewho.com", Password: "123"}
err := s.user.Create()
c.Assert(err, IsNil)
s.team = &auth.Team{Name: "tsuruteam", Users: []string{s.user.Email}}
err = s.conn.Teams().Insert(s.team)
c.Assert(err, IsNil)
}
// starts a new httptest.Server and returns it
// Also changes git:host, git:port and git:protocol to match the server's url
func (s *AuthSuite) startGandalfTestServer(h http.Handler) *httptest.Server {
ts := httptest.NewServer(h)
pieces := strings.Split(ts.URL, "://")
protocol := pieces[0]
hostPart := strings.Split(pieces[1], ":")
port := hostPart[1]
host := hostPart[0]
config.Set("git:host", host)
portInt, _ := strconv.ParseInt(port, 10, 0)
config.Set("git:port", portInt)
config.Set("git:protocol", protocol)
return ts
}
func (s *AuthSuite) getTestData(p ...string) io.ReadCloser {
p = append([]string{}, ".", "testdata")
fp := path.Join(p...)
f, _ := os.OpenFile(fp, os.O_RDONLY, 0)
return f
}
type hasKeyChecker struct{}
func (c *hasKeyChecker) Info() *CheckerInfo {
return &CheckerInfo{Name: "HasKey", Params: []string{"user", "key"}}
}
func (c *hasKeyChecker) Check(params []interface{}, names []string) (bool, string) {
if len(params) != 2 {
return false, "you should provide two parameters"
}
user, ok := params[0].(*auth.User)
if !ok {
return false, "first parameter should be a user pointer"
}
content, ok := params[1].(string)
if !ok {
return false, "second parameter should be a string"
}
key := auth.Key{Content: content}
return user.HasKey(key), ""
}
var HasKey Checker = &hasKeyChecker{}
type userPresenceChecker struct{}
func (c *userPresenceChecker) Info() *CheckerInfo {
return &CheckerInfo{Name: "ContainsUser", Params: []string{"team", "user"}}
}
func (c *userPresenceChecker) Check(params []interface{}, names []string) (bool, string) {
team, ok := params[0].(*auth.Team)
if !ok {
return false, "first parameter should be a pointer to a team instance"
}
user, ok := params[1].(*auth.User)
if !ok {
return false, "second parameter should be a pointer to a user instance"
}
return team.ContainsUser(user), ""
}
var ContainsUser Checker = &userPresenceChecker{}
func (s *AuthSuite) TestCreateUserHandlerSavesTheUserInTheDatabase(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := bytes.NewBufferString(`{"email":"nobody@globo.com","password":"123456"}`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, IsNil)
u := auth.User{Email: "nobody@globo.com"}
err = u.Get()
c.Assert(err, IsNil)
}
func (s *AuthSuite) TestCreateUserHandlerReturnsStatus201AfterCreateTheUser(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := bytes.NewBufferString(`{"email":"nobody@globo.com","password":"123456"}`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, IsNil)
c.Assert(recorder.Code, Equals, 201)
}
func (s *AuthSuite) TestCreateUserHandlerReturnErrorIfReadingBodyFails(c *C) {
b := s.getTestData("bodyToBeClosed.txt")
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
request.Body.Close()
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^.*bad file descriptor$")
}
func (s *AuthSuite) TestCreateUserHandlerReturnErrorAndBadRequestIfInvalidJSONIsGiven(c *C) {
b := bytes.NewBufferString(`["invalid json":"i'm invalid"]`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^invalid character.*$")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
}
func (s *AuthSuite) TestCreateUserHandlerReturnErrorAndConflictIfItFailsToCreateUser(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "nobody@globo.com", Password: "123456"}
u.Create()
b := bytes.NewBufferString(`{"email":"nobody@globo.com","password":"123456"}`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "This email is already registered")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusConflict)
}
func (s *AuthSuite) TestCreateUserHandlerReturnsPreconditionFailedIfEmailIsNotValid(c *C) {
b := bytes.NewBufferString(`{"email":"nobody","password":"123456"}`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusPreconditionFailed)
c.Assert(e.Message, Equals, "Invalid email.")
}
func (s *AuthSuite) TestCreateUserHandlerReturnsPreconditionFailedIfPasswordHasLessThan6CharactersOrMoreThan50Characters(c *C) {
passwords := []string{"123", strings.Join(make([]string, 52), "-")}
for _, password := range passwords {
b := bytes.NewBufferString(`{"email":"nobody@globo.com","password":"` + password + `"}`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusPreconditionFailed)
c.Assert(e.Message, Equals, "Password length should be least 6 characters and at most 50 characters.")
}
}
func (s *AuthSuite) TestCreateUserCreatesUserInGandalf(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := bytes.NewBufferString(`{"email":"nobody@me.myself","password":"123456"}`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
defer s.conn.Users().Remove(bson.M{"email": "nobody@me.myself"})
err = CreateUser(recorder, request)
c.Assert(err, IsNil)
c.Assert(h.url[0], Equals, "/user")
expected := `{"name":"nobody@me.myself","keys":{}}`
c.Assert(string(h.body[0]), Equals, expected)
c.Assert(h.method[0], Equals, "POST")
}
func (s *AuthSuite) TestLoginShouldCreateTokenInTheDatabaseAndReturnItWithinTheResponse(c *C) {
u := auth.User{Email: "nobody@globo.com", Password: "123456"}
u.Create()
b := bytes.NewBufferString(`{"password":"123456"}`)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/tokens?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, IsNil)
var user auth.User
collection := s.conn.Users()
err = collection.Find(bson.M{"email": "nobody@globo.com"}).One(&user)
var recorderJson map[string]string
r, _ := ioutil.ReadAll(recorder.Body)
json.Unmarshal(r, &recorderJson)
c.Assert(recorderJson["token"], Equals, user.Tokens[0].Token)
}
func (s *AuthSuite) TestLoginShouldReturnErrorAndBadRequestIfItReceivesAnInvalidJson(c *C) {
b := bytes.NewBufferString(`"invalid":"json"]`)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/tokens?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^Invalid JSON$")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
}
func (s *AuthSuite) TestLoginShouldReturnErrorAndBadRequestIfTheJSONDoesNotContainsAPassword(c *C) {
b := bytes.NewBufferString(`{}`)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/tokens?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^You must provide a password to login$")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
}
func (s *AuthSuite) TestLoginShouldReturnErrorAndNotFoundIfTheUserDoesNotExist(c *C) {
b := bytes.NewBufferString(`{"password":"123456"}`)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/tokens?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^User not found$")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
}
func (s *AuthSuite) TestLoginShouldreturnErrorIfThePasswordDoesNotMatch(c *C) {
u := auth.User{Email: "nobody@globo.com", Password: "123456"}
u.Create()
b := bytes.NewBufferString(`{"password":"1234567"}`)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/tokens?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^Authentication failed, wrong password$")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusUnauthorized)
}
func (s *AuthSuite) TestLoginShouldReturnErrorAndInternalServerErrorIfReadAllFails(c *C) {
b := s.getTestData("bodyToBeClosed.txt")
err := b.Close()
c.Assert(err, IsNil)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/tokens?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
}
func (s *AuthSuite) TestLoginShouldReturnPreconditionFailedIfEmailIsNotValid(c *C) {
b := bytes.NewBufferString(`{"password":"123456"}`)
request, err := http.NewRequest("POST", "/users/nobody/token?:email=nobody", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusPreconditionFailed)
c.Assert(e.Message, Equals, emailError)
}
func (s *AuthSuite) TestLoginShouldReturnPreconditionFailedIfPasswordIsLessesThan6CharactersOrGreaterThan50Characters(c *C) {
passwords := []string{"123", strings.Join(make([]string, 52), "-")}
for _, password := range passwords {
b := bytes.NewBufferString(`{"password":"` + password + `"}`)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/token?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusPreconditionFailed)
c.Assert(e.Message, Equals, passwordError)
}
}
func (s *AuthSuite) TestCreateTeamHandlerSavesTheTeamInTheDatabaseWithTheAuthenticatedUser(c *C) {
b := bytes.NewBufferString(`{"name":"timeredbull"}`)
request, err := http.NewRequest("POST", "/teams", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateTeam(recorder, request, s.user)
c.Assert(err, IsNil)
t := new(auth.Team)
err = s.conn.Teams().Find(bson.M{"_id": "timeredbull"}).One(t)
defer s.conn.Teams().Remove(bson.M{"_id": "timeredbull"})
c.Assert(err, IsNil)
c.Assert(t, ContainsUser, s.user)
}
func (s *AuthSuite) TestCreateTeamHandlerReturnsBadRequestIfTheRequestBodyIsAnInvalidJSON(c *C) {
b := bytes.NewBufferString(`{"name"["invalidjson"]}`)
request, err := http.NewRequest("POST", "/teams", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
}
func (s *AuthSuite) TestCreateTeamHandlerReturnsBadRequestIfTheNameIsNotGiven(c *C) {
b := bytes.NewBufferString(`{"genre":"male"}`)
request, err := http.NewRequest("POST", "/teams", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateTeam(recorder, request, s.user)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^You must provide the team name$")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
}
func (s *AuthSuite) TestCreateTeamHandlerReturnsInternalServerErrorIfReadAllFails(c *C) {
b := s.getTestData("bodyToBeClosed.txt")
err := b.Close()
c.Assert(err, IsNil)
request, err := http.NewRequest("POST", "/teams", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateTeam(recorder, request, s.user)
c.Assert(err, NotNil)
}
func (s *AuthSuite) TestCreateTeamHandlerReturnConflictIfTheTeamToBeCreatedAlreadyExists(c *C) {
err := s.conn.Teams().Insert(bson.M{"_id": "timeredbull"})
defer s.conn.Teams().Remove(bson.M{"_id": "timeredbull"})
c.Assert(err, IsNil)
b := bytes.NewBufferString(`{"name":"timeredbull"}`)
request, err := http.NewRequest("POST", "/teams", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusConflict)
c.Assert(e, ErrorMatches, "^This team already exists$")
}
func (s *AuthSuite) TestKeyToMap(c *C) {
keys := []auth.Key{{Name: "testkey", Content: "somekey"}}
kMap := keyToMap(keys)
c.Assert(kMap, DeepEquals, map[string]string{"testkey": "somekey"})
}
func (s *AuthSuite) TestRemoveTeam(c *C) {
team := auth.Team{Name: "painofsalvation", Users: []string{s.user.Email}}
err := s.conn.Teams().Insert(team)
c.Assert(err, IsNil)
defer s.conn.Teams().Remove(bson.M{"_id": team.Name})
request, err := http.NewRequest("DELETE", fmt.Sprintf("/teams/%s?:name=%s", team.Name, team.Name), nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveTeam(recorder, request, s.user)
c.Assert(err, IsNil)
n, err := s.conn.Teams().Find(bson.M{"name": team.Name}).Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *AuthSuite) TestRemoveTeamGives404WhenTeamDoesNotExist(c *C) {
request, err := http.NewRequest("DELETE", "/teams/unknown?:name=unknown", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
c.Assert(e.Message, Equals, `Team "unknown" not found.`)
}
func (s *AuthSuite) TestRemoveTeamGives404WhenUserDoesNotHaveAccessToTheTeam(c *C) {
team := auth.Team{Name: "painofsalvation"}
err := s.conn.Teams().Insert(team)
c.Assert(err, IsNil)
defer s.conn.Teams().Remove(bson.M{"_id": team.Name})
request, err := http.NewRequest("DELETE", fmt.Sprintf("/teams/%s?:name=%s", team.Name, team.Name), nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
c.Assert(e.Message, Equals, `Team "painofsalvation" not found.`)
}
func (s *AuthSuite) TestRemoveTeamGives403WhenTeamHasAccessToAnyApp(c *C) {
team := auth.Team{Name: "evergrey", Users: []string{s.user.Email}}
err := s.conn.Teams().Insert(team)
c.Assert(err, IsNil)
defer s.conn.Teams().Remove(bson.M{"_id": team.Name})
a := App{Name: "i-should", Teams: []string{team.Name}}
err = s.conn.Apps().Insert(a)
c.Assert(err, IsNil)
defer s.conn.Apps().Remove(bson.M{"name": a.Name})
request, err := http.NewRequest("DELETE", fmt.Sprintf("/teams/%s?:name=%s", team.Name, team.Name), nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusForbidden)
expected := `This team cannot be removed because it have access to apps.
Please remove the apps or revoke these accesses, and try again.`
c.Assert(e.Message, Equals, expected)
}
func (s *AuthSuite) TestListTeamsListsAllTeamsThatTheUserIsMember(c *C) {
request, err := http.NewRequest("GET", "/teams", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ListTeams(recorder, request, s.user)
c.Assert(err, IsNil)
b, err := ioutil.ReadAll(recorder.Body)
c.Assert(err, IsNil)
var m []map[string]string
err = json.Unmarshal(b, &m)
c.Assert(err, IsNil)
c.Assert(m, DeepEquals, []map[string]string{{"name": s.team.Name}})
}
func (s *AuthSuite) TestListTeamsReturns204IfTheUserHasNoTeam(c *C) {
u := auth.User{Email: "cruiser@gotthard.com", Password: "123"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
request, err := http.NewRequest("GET", "/teams", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ListTeams(recorder, request, &u)
c.Assert(err, IsNil)
c.Assert(recorder.Code, Equals, http.StatusNoContent)
}
func (s *AuthSuite) TestAddUserToTeamShouldAddAUserToATeamIfTheUserAndTheTeamExistAndTheGivenUserIsMemberOfTheTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "wolverine@xmen.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
url := "/teams/tsuruteam/wolverine@xmen.com?:team=tsuruteam&:user=wolverine@xmen.com"
request, err := http.NewRequest("PUT", url, nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddUserToTeam(recorder, request, s.user)
c.Assert(err, IsNil)
t := new(auth.Team)
err = s.conn.Teams().Find(bson.M{"_id": "tsuruteam"}).One(t)
c.Assert(err, IsNil)
c.Assert(t, ContainsUser, s.user)
c.Assert(t, ContainsUser, u)
}
func (s *AuthSuite) TestAddUserToTeamShouldReturnNotFoundIfThereIsNoTeamWithTheGivenName(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
request, err := http.NewRequest("PUT", "/teams/abc/me@me.me?:team=abc&:user=me@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddUserToTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
c.Assert(e, ErrorMatches, "^Team not found$")
}
func (s *AuthSuite) TestAddUserToTeamShouldReturnUnauthorizedIfTheGivenUserIsNotInTheGivenTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "hi@me.me", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
request, err := http.NewRequest("PUT", "/teams/tsuruteam/hi@me.me?:team=tsuruteam&:user=hi@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddUserToTeam(recorder, request, u)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusUnauthorized)
c.Assert(e, ErrorMatches, "^You are not authorized to add new users to the team tsuruteam$")
}
func (s *AuthSuite) TestAddUserToTeamShouldReturnNotFoundIfTheEmailInTheBodyDoesNotExistInTheDatabase(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
request, err := http.NewRequest("PUT", "/teams/tsuruteam/hi2@me.me?:team=tsuruteam&:user=hi2@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddUserToTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
c.Assert(e, ErrorMatches, "^User not found$")
}
func (s *AuthSuite) TestAddUserToTeamShouldReturnConflictIfTheUserIsAlreadyInTheGroup(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
url := fmt.Sprintf("/teams/%s/%s?:team=%s&:user=%s", s.team.Name, s.user.Email, s.team.Name, s.user.Email)
request, err := http.NewRequest("PUT", url, nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddUserToTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusConflict)
}
func (s *AuthSuite) TestAddUserToTeamShoulGrantAccessInGandalf(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "marathon@rush.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
a := App{Name: "i-should", Teams: []string{s.team.Name}}
err = s.conn.Apps().Insert(a)
c.Assert(err, IsNil)
defer s.conn.Apps().Remove(bson.M{"name": a.Name})
err = addKeyToUser("my-key", u)
c.Assert(err, IsNil)
err = u.Get()
c.Assert(err, IsNil)
err = addUserToTeam(u.Email, s.team.Name, s.user)
c.Assert(err, IsNil)
c.Check(len(h.url), Equals, 2)
c.Assert(h.url[1], Equals, "/repository/grant")
c.Assert(h.method[1], Equals, "POST")
expected := fmt.Sprintf(`{"repositories":["%s"],"users":["marathon@rush.com"]}`, a.Name)
c.Assert(string(h.body[1]), Equals, expected)
}
func (s *AuthSuite) TestAddUserToTeamInDatabase(c *C) {
user := &auth.User{Email: "nobody@gmail.com", Password: "123456"}
team := &auth.Team{Name: "myteam"}
err := s.conn.Teams().Insert(team)
c.Assert(err, IsNil)
defer s.conn.Teams().RemoveId(team.Name)
err = addUserToTeamInDatabase(user, team)
c.Assert(err, IsNil)
s.conn.Teams().FindId(team.Name).One(team)
c.Assert(team.Users, DeepEquals, []string{user.Email})
}
func (s *AuthSuite) TestAddUserToTeamInGandalfShouldCallGandalfApi(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "nonee@me.me", Password: "none"}
err := addUserToTeamInGandalf("me@gmail.com", &u, s.team)
c.Assert(err, IsNil)
c.Assert(len(h.url), Equals, 1)
c.Assert(h.url[0], Equals, "/repository/grant")
}
func (s *AuthSuite) TestRemoveUserFromTeamShouldRemoveAUserFromATeamIfTheTeamExistAndTheUserIsMemberOfTheTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "nonee@me.me", Password: "none"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
s.team.AddUser(&u)
s.conn.Teams().Update(bson.M{"_id": s.team.Name}, s.team)
request, err := http.NewRequest("DELETE", "/teams/tsuruteam/nonee@me.me?:team=tsuruteam&:user=nonee@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUserFromTeam(recorder, request, s.user)
c.Assert(err, IsNil)
err = s.conn.Teams().Find(bson.M{"_id": s.team.Name}).One(s.team)
c.Assert(err, IsNil)
c.Assert(s.team, Not(ContainsUser), &u)
}
func (s *AuthSuite) TestRemoveUserFromTeamShouldRemoveOnlyAppsInThatTeamInGandalfWhenUserIsInMoreThanOneTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "nobody@me.me", Password: "none"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
s.team.AddUser(&u)
s.conn.Teams().UpdateId(s.team.Name, s.team)
team2 := auth.Team{Name: "team2", Users: []string{u.Email}}
err = s.conn.Teams().Insert(&team2)
c.Assert(err, IsNil)
defer s.conn.Teams().RemoveId(team2.Name)
app1 := app.App{Name: "app1", Teams: []string{s.team.Name}}
err = s.conn.Apps().Insert(&app1)
c.Assert(err, IsNil)
defer s.conn.Apps().Remove(bson.M{"name": app1.Name})
app2 := app.App{Name: "app2", Teams: []string{team2.Name}}
err = s.conn.Apps().Insert(&app2)
c.Assert(err, IsNil)
defer s.conn.Apps().Remove(bson.M{"name": app2.Name})
err = removeUserFromTeam(u.Email, s.team.Name, s.user)
c.Assert(err, IsNil)
expected := `{"repositories":["app1"],"users":["nobody@me.me"]}`
c.Assert(len(h.body), Equals, 1)
c.Assert(string(h.body[0]), Equals, expected)
s.conn.Teams().FindId(s.team.Name).One(s.team)
c.Assert(s.team, Not(ContainsUser), &u) // just in case
}
func (s *AuthSuite) TestRemoveUserFromTeamShouldReturnNotFoundIfTheTeamDoesNotExist(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
request, err := http.NewRequest("DELETE", "/teams/tsuruteam/none@me.me?:team=unknown&:user=none@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUserFromTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
c.Assert(e, ErrorMatches, "^Team not found$")
}
func (s *AuthSuite) TestRemoveUserFromTeamShouldReturnUnauthorizedIfTheGivenUserIsNotMemberOfTheTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
request, err := http.NewRequest("DELETE", "/teams/tsuruteam/none@me.me?:team=tsuruteam&:user=none@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUserFromTeam(recorder, request, &auth.User{Email: "unknown@gmail.com"})
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusUnauthorized)
c.Assert(e, ErrorMatches, "^You are not authorized to remove a member from the team tsuruteam")
}
func (s *AuthSuite) TestRemoveUserFromTeamShouldReturnNotFoundWhenTheUserIsNotMemberOfTheTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "nobody@me.me", Password: "132"}
s.team.AddUser(u)
s.conn.Teams().Update(bson.M{"_id": s.team.Name}, s.team)
defer func(t *auth.Team, u *auth.User) {
s.team.RemoveUser(u)
s.conn.Teams().Update(bson.M{"_id": t.Name}, t)
}(s.team, u)
request, err := http.NewRequest("DELETE", "/teams/tsuruteam/none@me.me?:team=tsuruteam&:user=none@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUserFromTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
}
func (s *AuthSuite) TestRemoveUserFromTeamShouldReturnForbiddenIfTheUserIsTheLastInTheTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
url := "/teams/tsuruteam/whydidifall@thewho.com?:team=tsuruteam&:user=whydidifall@thewho.com"
request, err := http.NewRequest("DELETE", url, nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUserFromTeam(recorder, request, s.user)
fmt.Println(s.team.Users)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusForbidden)
c.Assert(e, ErrorMatches, "^You can not remove this user from this team, because it is the last user within the team, and a team can not be orphaned$")
}
func (s *AuthSuite) TestRemoveUserFromTeamRevokesAccessInGandalf(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "pomar@nando-reis.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
err = addKeyToUser("my-key", u)
c.Assert(err, IsNil)
err = u.Get()
c.Assert(err, IsNil)
err = addUserToTeam("pomar@nando-reis.com", s.team.Name, s.user)
c.Assert(err, IsNil)
a := struct {
Name string
Teams []string
}{Name: "myApp", Teams: []string{s.team.Name}}
err = s.conn.Apps().Insert(a)
c.Assert(err, IsNil)
defer s.conn.Apps().Remove(bson.M{"name": a.Name})
err = removeUserFromTeam("pomar@nando-reis.com", s.team.Name, s.user)
c.Assert(err, IsNil)
c.Assert(h.url[2], Equals, "/repository/revoke")
c.Assert(h.method[2], Equals, "DELETE")
expected := `{"repositories":["myApp"],"users":["pomar@nando-reis.com"]}`
c.Assert(string(h.body[2]), Equals, expected)
}
// func (s *AuthSuite) TestRemoveUserFromTeamInGandalf(c *C) {
// h := testHandler{}
// ts := s.startGandalfTestServer(&h)
// defer ts.Close()
// removeUserFromTeamInGandalf(email)
// }
func (s *AuthSuite) TestAddKeyToUserAddsAKeyToTheUser(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
defer func() {
s.user.RemoveKey(auth.Key{Content: "my-key"})
s.conn.Users().Update(bson.M{"email": s.user.Email}, s.user)
}()
b := bytes.NewBufferString(`{"key":"my-key"}`)
request, err := http.NewRequest("POST", "/users/keys", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddKeyToUser(recorder, request, s.user)
c.Assert(err, IsNil)
s.user.Get()
c.Assert(s.user, HasKey, "my-key")
}
func (s *AuthSuite) TestAddKeyToUserReturnsErrorIfTheReadingOfTheBodyFails(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := s.getTestData("bodyToBeClosed.txt")
b.Close()
request, err := http.NewRequest("POST", "/users/keys", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddKeyToUser(recorder, request, s.user)
c.Assert(err, NotNil)
}
func (s *AuthSuite) TestAddKeyToUserReturnsBadRequestIfTheJsonIsInvalid(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := bytes.NewBufferString(`"aaaa}`)
request, err := http.NewRequest("POST", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddKeyToUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e, ErrorMatches, "^Invalid JSON$")
}
func (s *AuthSuite) TestAddKeyToUserReturnsBadRequestIfTheKeyIsNotPresent(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := bytes.NewBufferString(`{}`)
request, err := http.NewRequest("POST", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddKeyToUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e, ErrorMatches, "^Missing key$")
}
func (s *AuthSuite) TestAddKeyToUserReturnsBadRequestIfTheKeyIsEmpty(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := bytes.NewBufferString(`{"key":""}`)
request, err := http.NewRequest("POST", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddKeyToUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e, ErrorMatches, "^Missing key$")
}
func (s *AuthSuite) TestAddKeyToUserReturnsConflictIfTheKeyIsAlreadyPresent(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
s.user.AddKey(auth.Key{Content: "my-key"})
s.conn.Users().Update(bson.M{"email": s.user.Email}, s.user)
defer func() {
s.user.RemoveKey(auth.Key{Content: "my-key"})
s.conn.Users().Update(bson.M{"email": s.user.Email}, s.user)
}()
b := bytes.NewBufferString(`{"key":"my-key"}`)
request, err := http.NewRequest("POST", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddKeyToUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusConflict)
c.Assert(e.Message, Equals, "User already has this key")
}
func (s *AuthSuite) TestAddKeyAddKeyToUserInGandalf(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "francisco@franciscosouza.net", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
err = addKeyToUser("my-key", u)
c.Assert(err, IsNil)
defer func() {
removeKeyFromUser("my-key", u)
s.conn.Users().RemoveAll(bson.M{"email": u.Email})
}()
c.Assert(u.Keys[0].Name, Not(Matches), "\\.pub$")
expectedUrl := fmt.Sprintf("/user/%s/key", u.Email)
c.Assert(h.url[0], Equals, expectedUrl)
c.Assert(h.method[0], Equals, "POST")
expected := fmt.Sprintf(`{"%s-1":"my-key"}`, u.Email)
c.Assert(string(h.body[0]), Equals, expected)
}
func (s *AuthSuite) TestAddKeyToUserShouldNotInsertKeyInDatabaseWhenGandalfAdditionFails(c *C) {
u := &auth.User{Email: "me@gmail.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
err = addKeyToUser("my-key", u)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Failed to add key to git server: Failed to connect to Gandalf server, it's probably down.")
defer func() {
s.conn.Users().RemoveAll(bson.M{"email": u.Email})
}()
u.Get()
c.Assert(u.Keys, DeepEquals, []auth.Key{})
}
func (s *AuthSuite) TestAddKeyInDatabaseShouldStoreUsersKeyInDB(c *C) {
u := &auth.User{Email: "me@gmail.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
key := auth.Key{Content: "my-ssh-key", Name: "key1"}
err = addKeyInDatabase(&key, u)
c.Assert(err, IsNil)
u.Get()
c.Assert(u.Keys, DeepEquals, []auth.Key{key})
}
func (s *AuthSuite) TestAddKeyInGandalfShouldCallGandalfApi(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "me@gmail.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
key := auth.Key{Content: "my-ssh-key", Name: "key1"}
err = addKeyInGandalf(&key, u)
c.Assert(err, IsNil)
c.Assert(len(h.url), Equals, 1)
c.Assert(h.url[0], Equals, "/user/me@gmail.com/key")
}
func (s *AuthSuite) TestRemoveKeyFromGandalfCallsGandalfApi(c *C) {
u := &auth.User{Email: "me@gmail.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
key := auth.Key{Name: "mykey", Content: "my-ssh-key"}
err = addKeyInGandalf(&key, u)
c.Assert(err, IsNil)
err = removeKeyFromGandalf(&key, u)
c.Assert(err, IsNil)
c.Assert(len(h.url), Equals, 2) // add and remove
expected := fmt.Sprintf("/user/me@gmail.com/key/%s", key.Name)
c.Assert(h.url[1], Matches, expected)
}
func (s *AuthSuite) TestRemoveKeyFromDatabase(c *C) {
u := &auth.User{Email: "me@gmail.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
key := auth.Key{Name: "mykey", Content: "my-ssh-key"}
err = addKeyInDatabase(&key, u)
c.Assert(err, IsNil)
err = removeKeyFromDatabase(&key, u)
c.Assert(err, IsNil)
u.Get()
c.Assert(u.Keys, DeepEquals, []auth.Key{})
}
func (s *AuthSuite) TestRemoveKeyHandlerRemovesTheKeyFromTheUser(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
addKeyToUser("my-key", s.user)
defer func() {
if s.user.HasKey(auth.Key{Content: "my-key"}) {
removeKeyFromUser("my-key", s.user)
}
}()
b := bytes.NewBufferString(`{"key":"my-key"}`)
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, IsNil)
s.user.Get()
c.Assert(s.user, Not(HasKey), "my-key")
}
func (s *AuthSuite) TestRemoveKeyHandlerCallsGandalfRemoveKey(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
err := addKeyToUser("my-key", s.user) //fills the first position in h properties
c.Assert(err, IsNil)
defer func() {
if s.user.HasKey(auth.Key{Content: "my-key"}) {
removeKeyFromUser("my-key", s.user)
}
}()
b := bytes.NewBufferString(`{"key":"my-key"}`)
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, IsNil)
c.Assert(h.url[1], Equals, fmt.Sprintf("/user/%s/key/%s-%d", s.user.Email, s.user.Email, len(s.user.Keys)+1))
c.Assert(h.method[1], Equals, "DELETE")
c.Assert(string(h.body[1]), Equals, "null")
}
func (s *AuthSuite) TestRemoveKeyHandlerReturnsErrorInCaseOfAnyIOFailure(c *C) {
b := s.getTestData("bodyToBeClosed.txt")
b.Close()
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, NotNil)
}
func (s *AuthSuite) TestRemoveKeyHandlerReturnsBadRequestIfTheJSONIsInvalid(c *C) {
b := bytes.NewBufferString(`invalid"json}`)
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e, ErrorMatches, "^Invalid JSON$")
}
func (s *AuthSuite) TestRemoveKeyHandlerReturnsBadRequestIfTheKeyIsNotPresent(c *C) {
b := bytes.NewBufferString(`{}`)
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e, ErrorMatches, "^Missing key$")
}
func (s *AuthSuite) TestRemoveKeyHandlerReturnsBadRequestIfTheKeyIsEmpty(c *C) {
b := bytes.NewBufferString(`{"key":""}`)
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e, ErrorMatches, "^Missing key$")
}
func (s *AuthSuite) TestRemoveKeyHandlerReturnsNotFoundIfTheUserDoesNotHaveTheKey(c *C) {
b := bytes.NewBufferString(`{"key":"my-key"}`)
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
}
func (s *AuthSuite) TestRemoveUser(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "her-voices@painofsalvation.com"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
request, err := http.NewRequest("DELETE", "/users", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUser(recorder, request, &u)
c.Assert(err, IsNil)
n, err := s.conn.Users().Find(bson.M{"email": u.Email}).Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *AuthSuite) TestRemoveUserWithTheUserBeingLastMemberOfATeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "of-two-beginnings@painofsalvation.com"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
t := auth.Team{Name: "painofsalvation", Users: []string{u.Email}}
err = s.conn.Teams().Insert(t)
c.Assert(err, IsNil)
defer s.conn.Teams().Remove(bson.M{"_id": t.Name})
request, err := http.NewRequest("DELETE", "/users", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUser(recorder, request, &u)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusForbidden)
expected := `This user is the last member of the team "painofsalvation", so it cannot be removed.
Please remove the team, them remove the user.`
c.Assert(e.Message, Equals, expected)
}
func (s *AuthSuite) TestRemoveUserShouldRemoveTheUserFromAllTeamsThatHeIsMember(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "of-two-beginnings@painofsalvation.com"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
t := auth.Team{Name: "painofsalvation", Users: []string{u.Email, s.user.Email}}
err = s.conn.Teams().Insert(t)
c.Assert(err, IsNil)
defer s.conn.Teams().Remove(bson.M{"_id": t.Name})
request, err := http.NewRequest("DELETE", "/users", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUser(recorder, request, &u)
c.Assert(err, IsNil)
err = s.conn.Teams().Find(bson.M{"_id": t.Name}).One(&t)
c.Assert(err, IsNil)
c.Assert(t.Users, HasLen, 1)
c.Assert(t.Users[0], Equals, s.user.Email)
}
type App struct {
Name string
Teams []string
}
func (s *AuthSuite) TestRemoveUserRevokesAccessInGandalf(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "of-two-beginnings@painofsalvation.com"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
t := auth.Team{Name: "painofsalvation", Users: []string{u.Email, s.user.Email}}
err = s.conn.Teams().Insert(t)
c.Assert(err, IsNil)
defer s.conn.Teams().Remove(bson.M{"_id": t.Name})
a := struct {
Name string
Teams []string
}{Name: "myApp", Teams: []string{t.Name}}
err = s.conn.Apps().Insert(a)
c.Assert(err, IsNil)
defer s.conn.Apps().Remove(bson.M{"name": a.Name})
request, err := http.NewRequest("DELETE", "/users", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUser(recorder, request, &u)
c.Assert(err, IsNil)
c.Assert(h.url[0], Equals, "/repository/revoke")
c.Assert(h.method[0], Equals, "DELETE")
expected := `{"repositories":["myApp"],"users":["of-two-beginnings@painofsalvation.com"]}`
c.Assert(string(h.body[0]), Equals, expected)
}
func (s *AuthSuite) TestChangePasswordHandler(c *C) {
body := bytes.NewBufferString(`{"old":"123","new":"123456"}`)
request, err := http.NewRequest("PUT", "/users/password", body)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ChangePassword(recorder, request, s.user)
c.Assert(err, IsNil)
otherUser := *s.user
err = otherUser.Get()
c.Assert(err, IsNil)
hashPassword := func(password string) string {
salt := []byte("tsuru-salt")
return fmt.Sprintf("%x", pbkdf2.Key([]byte(password), salt, 4096, len(salt)*8, sha512.New))
}
expectedPassword := hashPassword("123456")
c.Assert(otherUser.Password, Equals, expectedPassword)
}
func (s *AuthSuite) TestChangePasswordReturns412IfNewPasswordIsInvalid(c *C) {
body := bytes.NewBufferString(`{"old":"123","new":"1234"}`)
request, err := http.NewRequest("PUT", "/users/password", body)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ChangePassword(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusPreconditionFailed)
c.Assert(e.Message, Equals, "Password length should be least 6 characters and at most 50 characters.")
}
func (s *AuthSuite) TestChangePasswordReturns404IfOldPasswordDidntMatch(c *C) {
body := bytes.NewBufferString(`{"old":"1234","new":"123456"}`)
request, err := http.NewRequest("PUT", "/users/password", body)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ChangePassword(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusForbidden)
c.Assert(e.Message, Equals, "The given password didn't match the user's current password.")
}
func (s *AuthSuite) TestChangePasswordReturns400IfRequestBodyIsInvalidJSON(c *C) {
body := bytes.NewBufferString(`{"invalid:"json`)
request, err := http.NewRequest("PUT", "/users/password", body)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ChangePassword(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e.Message, Equals, "Invalid JSON.")
}
func (s *AuthSuite) TestChangePasswordReturns400IfJSONDoesNotIncludeBothOldAndNewPasswords(c *C) {
bodies := []string{`{"old": "something"}`, `{"new":"something"}`, "{}", "null"}
for _, body := range bodies {
b := bytes.NewBufferString(body)
request, err := http.NewRequest("PUT", "/users/password", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ChangePassword(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e.Message, Equals, "Both the old and the new passwords are required.")
}
}
api/auth_test: removed forgotten print
// Copyright 2013 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"code.google.com/p/go.crypto/pbkdf2"
"crypto/sha512"
"encoding/json"
"fmt"
"github.com/globocom/config"
"github.com/globocom/tsuru/app"
"github.com/globocom/tsuru/auth"
"github.com/globocom/tsuru/db"
"github.com/globocom/tsuru/errors"
"io"
"io/ioutil"
"labix.org/v2/mgo/bson"
. "launchpad.net/gocheck"
"net/http"
"net/http/httptest"
"os"
"path"
"strconv"
"strings"
)
type AuthSuite struct {
conn *db.Storage
team *auth.Team
user *auth.User
}
var _ = Suite(&AuthSuite{})
func (s *AuthSuite) SetUpSuite(c *C) {
err := config.ReadConfigFile("../etc/tsuru.conf")
c.Assert(err, IsNil)
config.Set("database:name", "tsuru_api_auth_test")
s.conn, err = db.Conn()
c.Assert(err, IsNil)
s.createUserAndTeam(c)
}
func (s *AuthSuite) TearDownSuite(c *C) {
s.conn.Apps().Database.DropDatabase()
}
func (s *AuthSuite) TearDownTest(c *C) {
_, err := s.conn.Users().RemoveAll(bson.M{"email": bson.M{"$ne": s.user.Email}})
c.Assert(err, IsNil)
_, err = s.conn.Teams().RemoveAll(bson.M{"_id": bson.M{"$ne": s.team.Name}})
c.Assert(err, IsNil)
s.user.Password = "123"
s.user.HashPassword()
err = s.user.Update()
c.Assert(err, IsNil)
}
func (s *AuthSuite) createUserAndTeam(c *C) {
s.user = &auth.User{Email: "whydidifall@thewho.com", Password: "123"}
err := s.user.Create()
c.Assert(err, IsNil)
s.team = &auth.Team{Name: "tsuruteam", Users: []string{s.user.Email}}
err = s.conn.Teams().Insert(s.team)
c.Assert(err, IsNil)
}
// starts a new httptest.Server and returns it
// Also changes git:host, git:port and git:protocol to match the server's url
func (s *AuthSuite) startGandalfTestServer(h http.Handler) *httptest.Server {
ts := httptest.NewServer(h)
pieces := strings.Split(ts.URL, "://")
protocol := pieces[0]
hostPart := strings.Split(pieces[1], ":")
port := hostPart[1]
host := hostPart[0]
config.Set("git:host", host)
portInt, _ := strconv.ParseInt(port, 10, 0)
config.Set("git:port", portInt)
config.Set("git:protocol", protocol)
return ts
}
func (s *AuthSuite) getTestData(p ...string) io.ReadCloser {
p = append([]string{}, ".", "testdata")
fp := path.Join(p...)
f, _ := os.OpenFile(fp, os.O_RDONLY, 0)
return f
}
type hasKeyChecker struct{}
func (c *hasKeyChecker) Info() *CheckerInfo {
return &CheckerInfo{Name: "HasKey", Params: []string{"user", "key"}}
}
func (c *hasKeyChecker) Check(params []interface{}, names []string) (bool, string) {
if len(params) != 2 {
return false, "you should provide two parameters"
}
user, ok := params[0].(*auth.User)
if !ok {
return false, "first parameter should be a user pointer"
}
content, ok := params[1].(string)
if !ok {
return false, "second parameter should be a string"
}
key := auth.Key{Content: content}
return user.HasKey(key), ""
}
var HasKey Checker = &hasKeyChecker{}
type userPresenceChecker struct{}
func (c *userPresenceChecker) Info() *CheckerInfo {
return &CheckerInfo{Name: "ContainsUser", Params: []string{"team", "user"}}
}
func (c *userPresenceChecker) Check(params []interface{}, names []string) (bool, string) {
team, ok := params[0].(*auth.Team)
if !ok {
return false, "first parameter should be a pointer to a team instance"
}
user, ok := params[1].(*auth.User)
if !ok {
return false, "second parameter should be a pointer to a user instance"
}
return team.ContainsUser(user), ""
}
var ContainsUser Checker = &userPresenceChecker{}
func (s *AuthSuite) TestCreateUserHandlerSavesTheUserInTheDatabase(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := bytes.NewBufferString(`{"email":"nobody@globo.com","password":"123456"}`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, IsNil)
u := auth.User{Email: "nobody@globo.com"}
err = u.Get()
c.Assert(err, IsNil)
}
func (s *AuthSuite) TestCreateUserHandlerReturnsStatus201AfterCreateTheUser(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := bytes.NewBufferString(`{"email":"nobody@globo.com","password":"123456"}`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, IsNil)
c.Assert(recorder.Code, Equals, 201)
}
func (s *AuthSuite) TestCreateUserHandlerReturnErrorIfReadingBodyFails(c *C) {
b := s.getTestData("bodyToBeClosed.txt")
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
request.Body.Close()
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^.*bad file descriptor$")
}
func (s *AuthSuite) TestCreateUserHandlerReturnErrorAndBadRequestIfInvalidJSONIsGiven(c *C) {
b := bytes.NewBufferString(`["invalid json":"i'm invalid"]`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^invalid character.*$")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
}
func (s *AuthSuite) TestCreateUserHandlerReturnErrorAndConflictIfItFailsToCreateUser(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "nobody@globo.com", Password: "123456"}
u.Create()
b := bytes.NewBufferString(`{"email":"nobody@globo.com","password":"123456"}`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "This email is already registered")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusConflict)
}
func (s *AuthSuite) TestCreateUserHandlerReturnsPreconditionFailedIfEmailIsNotValid(c *C) {
b := bytes.NewBufferString(`{"email":"nobody","password":"123456"}`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusPreconditionFailed)
c.Assert(e.Message, Equals, "Invalid email.")
}
func (s *AuthSuite) TestCreateUserHandlerReturnsPreconditionFailedIfPasswordHasLessThan6CharactersOrMoreThan50Characters(c *C) {
passwords := []string{"123", strings.Join(make([]string, 52), "-")}
for _, password := range passwords {
b := bytes.NewBufferString(`{"email":"nobody@globo.com","password":"` + password + `"}`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateUser(recorder, request)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusPreconditionFailed)
c.Assert(e.Message, Equals, "Password length should be least 6 characters and at most 50 characters.")
}
}
func (s *AuthSuite) TestCreateUserCreatesUserInGandalf(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := bytes.NewBufferString(`{"email":"nobody@me.myself","password":"123456"}`)
request, err := http.NewRequest("POST", "/users", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
defer s.conn.Users().Remove(bson.M{"email": "nobody@me.myself"})
err = CreateUser(recorder, request)
c.Assert(err, IsNil)
c.Assert(h.url[0], Equals, "/user")
expected := `{"name":"nobody@me.myself","keys":{}}`
c.Assert(string(h.body[0]), Equals, expected)
c.Assert(h.method[0], Equals, "POST")
}
func (s *AuthSuite) TestLoginShouldCreateTokenInTheDatabaseAndReturnItWithinTheResponse(c *C) {
u := auth.User{Email: "nobody@globo.com", Password: "123456"}
u.Create()
b := bytes.NewBufferString(`{"password":"123456"}`)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/tokens?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, IsNil)
var user auth.User
collection := s.conn.Users()
err = collection.Find(bson.M{"email": "nobody@globo.com"}).One(&user)
var recorderJson map[string]string
r, _ := ioutil.ReadAll(recorder.Body)
json.Unmarshal(r, &recorderJson)
c.Assert(recorderJson["token"], Equals, user.Tokens[0].Token)
}
func (s *AuthSuite) TestLoginShouldReturnErrorAndBadRequestIfItReceivesAnInvalidJson(c *C) {
b := bytes.NewBufferString(`"invalid":"json"]`)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/tokens?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^Invalid JSON$")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
}
func (s *AuthSuite) TestLoginShouldReturnErrorAndBadRequestIfTheJSONDoesNotContainsAPassword(c *C) {
b := bytes.NewBufferString(`{}`)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/tokens?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^You must provide a password to login$")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
}
func (s *AuthSuite) TestLoginShouldReturnErrorAndNotFoundIfTheUserDoesNotExist(c *C) {
b := bytes.NewBufferString(`{"password":"123456"}`)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/tokens?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^User not found$")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
}
func (s *AuthSuite) TestLoginShouldreturnErrorIfThePasswordDoesNotMatch(c *C) {
u := auth.User{Email: "nobody@globo.com", Password: "123456"}
u.Create()
b := bytes.NewBufferString(`{"password":"1234567"}`)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/tokens?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^Authentication failed, wrong password$")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusUnauthorized)
}
func (s *AuthSuite) TestLoginShouldReturnErrorAndInternalServerErrorIfReadAllFails(c *C) {
b := s.getTestData("bodyToBeClosed.txt")
err := b.Close()
c.Assert(err, IsNil)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/tokens?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
}
func (s *AuthSuite) TestLoginShouldReturnPreconditionFailedIfEmailIsNotValid(c *C) {
b := bytes.NewBufferString(`{"password":"123456"}`)
request, err := http.NewRequest("POST", "/users/nobody/token?:email=nobody", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusPreconditionFailed)
c.Assert(e.Message, Equals, emailError)
}
func (s *AuthSuite) TestLoginShouldReturnPreconditionFailedIfPasswordIsLessesThan6CharactersOrGreaterThan50Characters(c *C) {
passwords := []string{"123", strings.Join(make([]string, 52), "-")}
for _, password := range passwords {
b := bytes.NewBufferString(`{"password":"` + password + `"}`)
request, err := http.NewRequest("POST", "/users/nobody@globo.com/token?:email=nobody@globo.com", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = Login(recorder, request)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusPreconditionFailed)
c.Assert(e.Message, Equals, passwordError)
}
}
func (s *AuthSuite) TestCreateTeamHandlerSavesTheTeamInTheDatabaseWithTheAuthenticatedUser(c *C) {
b := bytes.NewBufferString(`{"name":"timeredbull"}`)
request, err := http.NewRequest("POST", "/teams", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateTeam(recorder, request, s.user)
c.Assert(err, IsNil)
t := new(auth.Team)
err = s.conn.Teams().Find(bson.M{"_id": "timeredbull"}).One(t)
defer s.conn.Teams().Remove(bson.M{"_id": "timeredbull"})
c.Assert(err, IsNil)
c.Assert(t, ContainsUser, s.user)
}
func (s *AuthSuite) TestCreateTeamHandlerReturnsBadRequestIfTheRequestBodyIsAnInvalidJSON(c *C) {
b := bytes.NewBufferString(`{"name"["invalidjson"]}`)
request, err := http.NewRequest("POST", "/teams", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
}
func (s *AuthSuite) TestCreateTeamHandlerReturnsBadRequestIfTheNameIsNotGiven(c *C) {
b := bytes.NewBufferString(`{"genre":"male"}`)
request, err := http.NewRequest("POST", "/teams", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateTeam(recorder, request, s.user)
c.Assert(err, NotNil)
c.Assert(err, ErrorMatches, "^You must provide the team name$")
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
}
func (s *AuthSuite) TestCreateTeamHandlerReturnsInternalServerErrorIfReadAllFails(c *C) {
b := s.getTestData("bodyToBeClosed.txt")
err := b.Close()
c.Assert(err, IsNil)
request, err := http.NewRequest("POST", "/teams", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateTeam(recorder, request, s.user)
c.Assert(err, NotNil)
}
func (s *AuthSuite) TestCreateTeamHandlerReturnConflictIfTheTeamToBeCreatedAlreadyExists(c *C) {
err := s.conn.Teams().Insert(bson.M{"_id": "timeredbull"})
defer s.conn.Teams().Remove(bson.M{"_id": "timeredbull"})
c.Assert(err, IsNil)
b := bytes.NewBufferString(`{"name":"timeredbull"}`)
request, err := http.NewRequest("POST", "/teams", b)
c.Assert(err, IsNil)
request.Header.Set("Content-type", "application/json")
recorder := httptest.NewRecorder()
err = CreateTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusConflict)
c.Assert(e, ErrorMatches, "^This team already exists$")
}
func (s *AuthSuite) TestKeyToMap(c *C) {
keys := []auth.Key{{Name: "testkey", Content: "somekey"}}
kMap := keyToMap(keys)
c.Assert(kMap, DeepEquals, map[string]string{"testkey": "somekey"})
}
func (s *AuthSuite) TestRemoveTeam(c *C) {
team := auth.Team{Name: "painofsalvation", Users: []string{s.user.Email}}
err := s.conn.Teams().Insert(team)
c.Assert(err, IsNil)
defer s.conn.Teams().Remove(bson.M{"_id": team.Name})
request, err := http.NewRequest("DELETE", fmt.Sprintf("/teams/%s?:name=%s", team.Name, team.Name), nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveTeam(recorder, request, s.user)
c.Assert(err, IsNil)
n, err := s.conn.Teams().Find(bson.M{"name": team.Name}).Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *AuthSuite) TestRemoveTeamGives404WhenTeamDoesNotExist(c *C) {
request, err := http.NewRequest("DELETE", "/teams/unknown?:name=unknown", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
c.Assert(e.Message, Equals, `Team "unknown" not found.`)
}
func (s *AuthSuite) TestRemoveTeamGives404WhenUserDoesNotHaveAccessToTheTeam(c *C) {
team := auth.Team{Name: "painofsalvation"}
err := s.conn.Teams().Insert(team)
c.Assert(err, IsNil)
defer s.conn.Teams().Remove(bson.M{"_id": team.Name})
request, err := http.NewRequest("DELETE", fmt.Sprintf("/teams/%s?:name=%s", team.Name, team.Name), nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
c.Assert(e.Message, Equals, `Team "painofsalvation" not found.`)
}
func (s *AuthSuite) TestRemoveTeamGives403WhenTeamHasAccessToAnyApp(c *C) {
team := auth.Team{Name: "evergrey", Users: []string{s.user.Email}}
err := s.conn.Teams().Insert(team)
c.Assert(err, IsNil)
defer s.conn.Teams().Remove(bson.M{"_id": team.Name})
a := App{Name: "i-should", Teams: []string{team.Name}}
err = s.conn.Apps().Insert(a)
c.Assert(err, IsNil)
defer s.conn.Apps().Remove(bson.M{"name": a.Name})
request, err := http.NewRequest("DELETE", fmt.Sprintf("/teams/%s?:name=%s", team.Name, team.Name), nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusForbidden)
expected := `This team cannot be removed because it have access to apps.
Please remove the apps or revoke these accesses, and try again.`
c.Assert(e.Message, Equals, expected)
}
func (s *AuthSuite) TestListTeamsListsAllTeamsThatTheUserIsMember(c *C) {
request, err := http.NewRequest("GET", "/teams", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ListTeams(recorder, request, s.user)
c.Assert(err, IsNil)
b, err := ioutil.ReadAll(recorder.Body)
c.Assert(err, IsNil)
var m []map[string]string
err = json.Unmarshal(b, &m)
c.Assert(err, IsNil)
c.Assert(m, DeepEquals, []map[string]string{{"name": s.team.Name}})
}
func (s *AuthSuite) TestListTeamsReturns204IfTheUserHasNoTeam(c *C) {
u := auth.User{Email: "cruiser@gotthard.com", Password: "123"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
request, err := http.NewRequest("GET", "/teams", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ListTeams(recorder, request, &u)
c.Assert(err, IsNil)
c.Assert(recorder.Code, Equals, http.StatusNoContent)
}
func (s *AuthSuite) TestAddUserToTeamShouldAddAUserToATeamIfTheUserAndTheTeamExistAndTheGivenUserIsMemberOfTheTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "wolverine@xmen.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
url := "/teams/tsuruteam/wolverine@xmen.com?:team=tsuruteam&:user=wolverine@xmen.com"
request, err := http.NewRequest("PUT", url, nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddUserToTeam(recorder, request, s.user)
c.Assert(err, IsNil)
t := new(auth.Team)
err = s.conn.Teams().Find(bson.M{"_id": "tsuruteam"}).One(t)
c.Assert(err, IsNil)
c.Assert(t, ContainsUser, s.user)
c.Assert(t, ContainsUser, u)
}
func (s *AuthSuite) TestAddUserToTeamShouldReturnNotFoundIfThereIsNoTeamWithTheGivenName(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
request, err := http.NewRequest("PUT", "/teams/abc/me@me.me?:team=abc&:user=me@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddUserToTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
c.Assert(e, ErrorMatches, "^Team not found$")
}
func (s *AuthSuite) TestAddUserToTeamShouldReturnUnauthorizedIfTheGivenUserIsNotInTheGivenTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "hi@me.me", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
request, err := http.NewRequest("PUT", "/teams/tsuruteam/hi@me.me?:team=tsuruteam&:user=hi@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddUserToTeam(recorder, request, u)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusUnauthorized)
c.Assert(e, ErrorMatches, "^You are not authorized to add new users to the team tsuruteam$")
}
func (s *AuthSuite) TestAddUserToTeamShouldReturnNotFoundIfTheEmailInTheBodyDoesNotExistInTheDatabase(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
request, err := http.NewRequest("PUT", "/teams/tsuruteam/hi2@me.me?:team=tsuruteam&:user=hi2@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddUserToTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
c.Assert(e, ErrorMatches, "^User not found$")
}
func (s *AuthSuite) TestAddUserToTeamShouldReturnConflictIfTheUserIsAlreadyInTheGroup(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
url := fmt.Sprintf("/teams/%s/%s?:team=%s&:user=%s", s.team.Name, s.user.Email, s.team.Name, s.user.Email)
request, err := http.NewRequest("PUT", url, nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddUserToTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusConflict)
}
func (s *AuthSuite) TestAddUserToTeamShoulGrantAccessInGandalf(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "marathon@rush.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
a := App{Name: "i-should", Teams: []string{s.team.Name}}
err = s.conn.Apps().Insert(a)
c.Assert(err, IsNil)
defer s.conn.Apps().Remove(bson.M{"name": a.Name})
err = addKeyToUser("my-key", u)
c.Assert(err, IsNil)
err = u.Get()
c.Assert(err, IsNil)
err = addUserToTeam(u.Email, s.team.Name, s.user)
c.Assert(err, IsNil)
c.Check(len(h.url), Equals, 2)
c.Assert(h.url[1], Equals, "/repository/grant")
c.Assert(h.method[1], Equals, "POST")
expected := fmt.Sprintf(`{"repositories":["%s"],"users":["marathon@rush.com"]}`, a.Name)
c.Assert(string(h.body[1]), Equals, expected)
}
func (s *AuthSuite) TestAddUserToTeamInDatabase(c *C) {
user := &auth.User{Email: "nobody@gmail.com", Password: "123456"}
team := &auth.Team{Name: "myteam"}
err := s.conn.Teams().Insert(team)
c.Assert(err, IsNil)
defer s.conn.Teams().RemoveId(team.Name)
err = addUserToTeamInDatabase(user, team)
c.Assert(err, IsNil)
s.conn.Teams().FindId(team.Name).One(team)
c.Assert(team.Users, DeepEquals, []string{user.Email})
}
func (s *AuthSuite) TestAddUserToTeamInGandalfShouldCallGandalfApi(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "nonee@me.me", Password: "none"}
err := addUserToTeamInGandalf("me@gmail.com", &u, s.team)
c.Assert(err, IsNil)
c.Assert(len(h.url), Equals, 1)
c.Assert(h.url[0], Equals, "/repository/grant")
}
func (s *AuthSuite) TestRemoveUserFromTeamShouldRemoveAUserFromATeamIfTheTeamExistAndTheUserIsMemberOfTheTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "nonee@me.me", Password: "none"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
s.team.AddUser(&u)
s.conn.Teams().Update(bson.M{"_id": s.team.Name}, s.team)
request, err := http.NewRequest("DELETE", "/teams/tsuruteam/nonee@me.me?:team=tsuruteam&:user=nonee@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUserFromTeam(recorder, request, s.user)
c.Assert(err, IsNil)
err = s.conn.Teams().Find(bson.M{"_id": s.team.Name}).One(s.team)
c.Assert(err, IsNil)
c.Assert(s.team, Not(ContainsUser), &u)
}
func (s *AuthSuite) TestRemoveUserFromTeamShouldRemoveOnlyAppsInThatTeamInGandalfWhenUserIsInMoreThanOneTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "nobody@me.me", Password: "none"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
s.team.AddUser(&u)
s.conn.Teams().UpdateId(s.team.Name, s.team)
team2 := auth.Team{Name: "team2", Users: []string{u.Email}}
err = s.conn.Teams().Insert(&team2)
c.Assert(err, IsNil)
defer s.conn.Teams().RemoveId(team2.Name)
app1 := app.App{Name: "app1", Teams: []string{s.team.Name}}
err = s.conn.Apps().Insert(&app1)
c.Assert(err, IsNil)
defer s.conn.Apps().Remove(bson.M{"name": app1.Name})
app2 := app.App{Name: "app2", Teams: []string{team2.Name}}
err = s.conn.Apps().Insert(&app2)
c.Assert(err, IsNil)
defer s.conn.Apps().Remove(bson.M{"name": app2.Name})
err = removeUserFromTeam(u.Email, s.team.Name, s.user)
c.Assert(err, IsNil)
expected := `{"repositories":["app1"],"users":["nobody@me.me"]}`
c.Assert(len(h.body), Equals, 1)
c.Assert(string(h.body[0]), Equals, expected)
s.conn.Teams().FindId(s.team.Name).One(s.team)
c.Assert(s.team, Not(ContainsUser), &u) // just in case
}
func (s *AuthSuite) TestRemoveUserFromTeamShouldReturnNotFoundIfTheTeamDoesNotExist(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
request, err := http.NewRequest("DELETE", "/teams/tsuruteam/none@me.me?:team=unknown&:user=none@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUserFromTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
c.Assert(e, ErrorMatches, "^Team not found$")
}
func (s *AuthSuite) TestRemoveUserFromTeamShouldReturnUnauthorizedIfTheGivenUserIsNotMemberOfTheTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
request, err := http.NewRequest("DELETE", "/teams/tsuruteam/none@me.me?:team=tsuruteam&:user=none@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUserFromTeam(recorder, request, &auth.User{Email: "unknown@gmail.com"})
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusUnauthorized)
c.Assert(e, ErrorMatches, "^You are not authorized to remove a member from the team tsuruteam")
}
func (s *AuthSuite) TestRemoveUserFromTeamShouldReturnNotFoundWhenTheUserIsNotMemberOfTheTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "nobody@me.me", Password: "132"}
s.team.AddUser(u)
s.conn.Teams().Update(bson.M{"_id": s.team.Name}, s.team)
defer func(t *auth.Team, u *auth.User) {
s.team.RemoveUser(u)
s.conn.Teams().Update(bson.M{"_id": t.Name}, t)
}(s.team, u)
request, err := http.NewRequest("DELETE", "/teams/tsuruteam/none@me.me?:team=tsuruteam&:user=none@me.me", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUserFromTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
}
func (s *AuthSuite) TestRemoveUserFromTeamShouldReturnForbiddenIfTheUserIsTheLastInTheTeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
url := "/teams/tsuruteam/whydidifall@thewho.com?:team=tsuruteam&:user=whydidifall@thewho.com"
request, err := http.NewRequest("DELETE", url, nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUserFromTeam(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusForbidden)
c.Assert(e, ErrorMatches, "^You can not remove this user from this team, because it is the last user within the team, and a team can not be orphaned$")
}
func (s *AuthSuite) TestRemoveUserFromTeamRevokesAccessInGandalf(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "pomar@nando-reis.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
err = addKeyToUser("my-key", u)
c.Assert(err, IsNil)
err = u.Get()
c.Assert(err, IsNil)
err = addUserToTeam("pomar@nando-reis.com", s.team.Name, s.user)
c.Assert(err, IsNil)
a := struct {
Name string
Teams []string
}{Name: "myApp", Teams: []string{s.team.Name}}
err = s.conn.Apps().Insert(a)
c.Assert(err, IsNil)
defer s.conn.Apps().Remove(bson.M{"name": a.Name})
err = removeUserFromTeam("pomar@nando-reis.com", s.team.Name, s.user)
c.Assert(err, IsNil)
c.Assert(h.url[2], Equals, "/repository/revoke")
c.Assert(h.method[2], Equals, "DELETE")
expected := `{"repositories":["myApp"],"users":["pomar@nando-reis.com"]}`
c.Assert(string(h.body[2]), Equals, expected)
}
// func (s *AuthSuite) TestRemoveUserFromTeamInGandalf(c *C) {
// h := testHandler{}
// ts := s.startGandalfTestServer(&h)
// defer ts.Close()
// removeUserFromTeamInGandalf(email)
// }
func (s *AuthSuite) TestAddKeyToUserAddsAKeyToTheUser(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
defer func() {
s.user.RemoveKey(auth.Key{Content: "my-key"})
s.conn.Users().Update(bson.M{"email": s.user.Email}, s.user)
}()
b := bytes.NewBufferString(`{"key":"my-key"}`)
request, err := http.NewRequest("POST", "/users/keys", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddKeyToUser(recorder, request, s.user)
c.Assert(err, IsNil)
s.user.Get()
c.Assert(s.user, HasKey, "my-key")
}
func (s *AuthSuite) TestAddKeyToUserReturnsErrorIfTheReadingOfTheBodyFails(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := s.getTestData("bodyToBeClosed.txt")
b.Close()
request, err := http.NewRequest("POST", "/users/keys", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddKeyToUser(recorder, request, s.user)
c.Assert(err, NotNil)
}
func (s *AuthSuite) TestAddKeyToUserReturnsBadRequestIfTheJsonIsInvalid(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := bytes.NewBufferString(`"aaaa}`)
request, err := http.NewRequest("POST", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddKeyToUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e, ErrorMatches, "^Invalid JSON$")
}
func (s *AuthSuite) TestAddKeyToUserReturnsBadRequestIfTheKeyIsNotPresent(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := bytes.NewBufferString(`{}`)
request, err := http.NewRequest("POST", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddKeyToUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e, ErrorMatches, "^Missing key$")
}
func (s *AuthSuite) TestAddKeyToUserReturnsBadRequestIfTheKeyIsEmpty(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
b := bytes.NewBufferString(`{"key":""}`)
request, err := http.NewRequest("POST", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddKeyToUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e, ErrorMatches, "^Missing key$")
}
func (s *AuthSuite) TestAddKeyToUserReturnsConflictIfTheKeyIsAlreadyPresent(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
s.user.AddKey(auth.Key{Content: "my-key"})
s.conn.Users().Update(bson.M{"email": s.user.Email}, s.user)
defer func() {
s.user.RemoveKey(auth.Key{Content: "my-key"})
s.conn.Users().Update(bson.M{"email": s.user.Email}, s.user)
}()
b := bytes.NewBufferString(`{"key":"my-key"}`)
request, err := http.NewRequest("POST", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = AddKeyToUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusConflict)
c.Assert(e.Message, Equals, "User already has this key")
}
func (s *AuthSuite) TestAddKeyAddKeyToUserInGandalf(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "francisco@franciscosouza.net", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
err = addKeyToUser("my-key", u)
c.Assert(err, IsNil)
defer func() {
removeKeyFromUser("my-key", u)
s.conn.Users().RemoveAll(bson.M{"email": u.Email})
}()
c.Assert(u.Keys[0].Name, Not(Matches), "\\.pub$")
expectedUrl := fmt.Sprintf("/user/%s/key", u.Email)
c.Assert(h.url[0], Equals, expectedUrl)
c.Assert(h.method[0], Equals, "POST")
expected := fmt.Sprintf(`{"%s-1":"my-key"}`, u.Email)
c.Assert(string(h.body[0]), Equals, expected)
}
func (s *AuthSuite) TestAddKeyToUserShouldNotInsertKeyInDatabaseWhenGandalfAdditionFails(c *C) {
u := &auth.User{Email: "me@gmail.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
err = addKeyToUser("my-key", u)
c.Assert(err, NotNil)
c.Assert(err.Error(), Equals, "Failed to add key to git server: Failed to connect to Gandalf server, it's probably down.")
defer func() {
s.conn.Users().RemoveAll(bson.M{"email": u.Email})
}()
u.Get()
c.Assert(u.Keys, DeepEquals, []auth.Key{})
}
func (s *AuthSuite) TestAddKeyInDatabaseShouldStoreUsersKeyInDB(c *C) {
u := &auth.User{Email: "me@gmail.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
key := auth.Key{Content: "my-ssh-key", Name: "key1"}
err = addKeyInDatabase(&key, u)
c.Assert(err, IsNil)
u.Get()
c.Assert(u.Keys, DeepEquals, []auth.Key{key})
}
func (s *AuthSuite) TestAddKeyInGandalfShouldCallGandalfApi(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := &auth.User{Email: "me@gmail.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
key := auth.Key{Content: "my-ssh-key", Name: "key1"}
err = addKeyInGandalf(&key, u)
c.Assert(err, IsNil)
c.Assert(len(h.url), Equals, 1)
c.Assert(h.url[0], Equals, "/user/me@gmail.com/key")
}
func (s *AuthSuite) TestRemoveKeyFromGandalfCallsGandalfApi(c *C) {
u := &auth.User{Email: "me@gmail.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
key := auth.Key{Name: "mykey", Content: "my-ssh-key"}
err = addKeyInGandalf(&key, u)
c.Assert(err, IsNil)
err = removeKeyFromGandalf(&key, u)
c.Assert(err, IsNil)
c.Assert(len(h.url), Equals, 2) // add and remove
expected := fmt.Sprintf("/user/me@gmail.com/key/%s", key.Name)
c.Assert(h.url[1], Matches, expected)
}
func (s *AuthSuite) TestRemoveKeyFromDatabase(c *C) {
u := &auth.User{Email: "me@gmail.com", Password: "123456"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
key := auth.Key{Name: "mykey", Content: "my-ssh-key"}
err = addKeyInDatabase(&key, u)
c.Assert(err, IsNil)
err = removeKeyFromDatabase(&key, u)
c.Assert(err, IsNil)
u.Get()
c.Assert(u.Keys, DeepEquals, []auth.Key{})
}
func (s *AuthSuite) TestRemoveKeyHandlerRemovesTheKeyFromTheUser(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
addKeyToUser("my-key", s.user)
defer func() {
if s.user.HasKey(auth.Key{Content: "my-key"}) {
removeKeyFromUser("my-key", s.user)
}
}()
b := bytes.NewBufferString(`{"key":"my-key"}`)
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, IsNil)
s.user.Get()
c.Assert(s.user, Not(HasKey), "my-key")
}
func (s *AuthSuite) TestRemoveKeyHandlerCallsGandalfRemoveKey(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
err := addKeyToUser("my-key", s.user) //fills the first position in h properties
c.Assert(err, IsNil)
defer func() {
if s.user.HasKey(auth.Key{Content: "my-key"}) {
removeKeyFromUser("my-key", s.user)
}
}()
b := bytes.NewBufferString(`{"key":"my-key"}`)
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, IsNil)
c.Assert(h.url[1], Equals, fmt.Sprintf("/user/%s/key/%s-%d", s.user.Email, s.user.Email, len(s.user.Keys)+1))
c.Assert(h.method[1], Equals, "DELETE")
c.Assert(string(h.body[1]), Equals, "null")
}
func (s *AuthSuite) TestRemoveKeyHandlerReturnsErrorInCaseOfAnyIOFailure(c *C) {
b := s.getTestData("bodyToBeClosed.txt")
b.Close()
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, NotNil)
}
func (s *AuthSuite) TestRemoveKeyHandlerReturnsBadRequestIfTheJSONIsInvalid(c *C) {
b := bytes.NewBufferString(`invalid"json}`)
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e, ErrorMatches, "^Invalid JSON$")
}
func (s *AuthSuite) TestRemoveKeyHandlerReturnsBadRequestIfTheKeyIsNotPresent(c *C) {
b := bytes.NewBufferString(`{}`)
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e, ErrorMatches, "^Missing key$")
}
func (s *AuthSuite) TestRemoveKeyHandlerReturnsBadRequestIfTheKeyIsEmpty(c *C) {
b := bytes.NewBufferString(`{"key":""}`)
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e, ErrorMatches, "^Missing key$")
}
func (s *AuthSuite) TestRemoveKeyHandlerReturnsNotFoundIfTheUserDoesNotHaveTheKey(c *C) {
b := bytes.NewBufferString(`{"key":"my-key"}`)
request, err := http.NewRequest("DELETE", "/users/key", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveKeyFromUser(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusNotFound)
}
func (s *AuthSuite) TestRemoveUser(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "her-voices@painofsalvation.com"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
request, err := http.NewRequest("DELETE", "/users", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUser(recorder, request, &u)
c.Assert(err, IsNil)
n, err := s.conn.Users().Find(bson.M{"email": u.Email}).Count()
c.Assert(err, IsNil)
c.Assert(n, Equals, 0)
}
func (s *AuthSuite) TestRemoveUserWithTheUserBeingLastMemberOfATeam(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "of-two-beginnings@painofsalvation.com"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
t := auth.Team{Name: "painofsalvation", Users: []string{u.Email}}
err = s.conn.Teams().Insert(t)
c.Assert(err, IsNil)
defer s.conn.Teams().Remove(bson.M{"_id": t.Name})
request, err := http.NewRequest("DELETE", "/users", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUser(recorder, request, &u)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusForbidden)
expected := `This user is the last member of the team "painofsalvation", so it cannot be removed.
Please remove the team, them remove the user.`
c.Assert(e.Message, Equals, expected)
}
func (s *AuthSuite) TestRemoveUserShouldRemoveTheUserFromAllTeamsThatHeIsMember(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "of-two-beginnings@painofsalvation.com"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
t := auth.Team{Name: "painofsalvation", Users: []string{u.Email, s.user.Email}}
err = s.conn.Teams().Insert(t)
c.Assert(err, IsNil)
defer s.conn.Teams().Remove(bson.M{"_id": t.Name})
request, err := http.NewRequest("DELETE", "/users", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUser(recorder, request, &u)
c.Assert(err, IsNil)
err = s.conn.Teams().Find(bson.M{"_id": t.Name}).One(&t)
c.Assert(err, IsNil)
c.Assert(t.Users, HasLen, 1)
c.Assert(t.Users[0], Equals, s.user.Email)
}
type App struct {
Name string
Teams []string
}
func (s *AuthSuite) TestRemoveUserRevokesAccessInGandalf(c *C) {
h := testHandler{}
ts := s.startGandalfTestServer(&h)
defer ts.Close()
u := auth.User{Email: "of-two-beginnings@painofsalvation.com"}
err := u.Create()
c.Assert(err, IsNil)
defer s.conn.Users().Remove(bson.M{"email": u.Email})
t := auth.Team{Name: "painofsalvation", Users: []string{u.Email, s.user.Email}}
err = s.conn.Teams().Insert(t)
c.Assert(err, IsNil)
defer s.conn.Teams().Remove(bson.M{"_id": t.Name})
a := struct {
Name string
Teams []string
}{Name: "myApp", Teams: []string{t.Name}}
err = s.conn.Apps().Insert(a)
c.Assert(err, IsNil)
defer s.conn.Apps().Remove(bson.M{"name": a.Name})
request, err := http.NewRequest("DELETE", "/users", nil)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = RemoveUser(recorder, request, &u)
c.Assert(err, IsNil)
c.Assert(h.url[0], Equals, "/repository/revoke")
c.Assert(h.method[0], Equals, "DELETE")
expected := `{"repositories":["myApp"],"users":["of-two-beginnings@painofsalvation.com"]}`
c.Assert(string(h.body[0]), Equals, expected)
}
func (s *AuthSuite) TestChangePasswordHandler(c *C) {
body := bytes.NewBufferString(`{"old":"123","new":"123456"}`)
request, err := http.NewRequest("PUT", "/users/password", body)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ChangePassword(recorder, request, s.user)
c.Assert(err, IsNil)
otherUser := *s.user
err = otherUser.Get()
c.Assert(err, IsNil)
hashPassword := func(password string) string {
salt := []byte("tsuru-salt")
return fmt.Sprintf("%x", pbkdf2.Key([]byte(password), salt, 4096, len(salt)*8, sha512.New))
}
expectedPassword := hashPassword("123456")
c.Assert(otherUser.Password, Equals, expectedPassword)
}
func (s *AuthSuite) TestChangePasswordReturns412IfNewPasswordIsInvalid(c *C) {
body := bytes.NewBufferString(`{"old":"123","new":"1234"}`)
request, err := http.NewRequest("PUT", "/users/password", body)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ChangePassword(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusPreconditionFailed)
c.Assert(e.Message, Equals, "Password length should be least 6 characters and at most 50 characters.")
}
func (s *AuthSuite) TestChangePasswordReturns404IfOldPasswordDidntMatch(c *C) {
body := bytes.NewBufferString(`{"old":"1234","new":"123456"}`)
request, err := http.NewRequest("PUT", "/users/password", body)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ChangePassword(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusForbidden)
c.Assert(e.Message, Equals, "The given password didn't match the user's current password.")
}
func (s *AuthSuite) TestChangePasswordReturns400IfRequestBodyIsInvalidJSON(c *C) {
body := bytes.NewBufferString(`{"invalid:"json`)
request, err := http.NewRequest("PUT", "/users/password", body)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ChangePassword(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e.Message, Equals, "Invalid JSON.")
}
func (s *AuthSuite) TestChangePasswordReturns400IfJSONDoesNotIncludeBothOldAndNewPasswords(c *C) {
bodies := []string{`{"old": "something"}`, `{"new":"something"}`, "{}", "null"}
for _, body := range bodies {
b := bytes.NewBufferString(body)
request, err := http.NewRequest("PUT", "/users/password", b)
c.Assert(err, IsNil)
recorder := httptest.NewRecorder()
err = ChangePassword(recorder, request, s.user)
c.Assert(err, NotNil)
e, ok := err.(*errors.Http)
c.Assert(ok, Equals, true)
c.Assert(e.Code, Equals, http.StatusBadRequest)
c.Assert(e.Message, Equals, "Both the old and the new passwords are required.")
}
}
|
[login] Remove the ID() function which is never used.
Change-Id: I924887fb9c8b142c10670cc3c7de6fa5fe0bcc9b
Reviewed-on: https://skia-review.googlesource.com/c/buildbot/+/392099
Auto-Submit: Joe Gregorio <b9cf2471ff6d33504cafe8e6f356b732b2cadced@google.com>
Reviewed-by: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com>
Commit-Queue: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com>
|
dl: add go1.13rc1
Change-Id: I2d98f699060e761f26ab590641061dc9f90e1918
Reviewed-on: https://go-review.googlesource.com/c/dl/+/191161
Reviewed-by: Dmitri Shuralyov <d181b7fea0ec87c86ba5a890ab716db52498e3ba@golang.org>
|
fix variable lower camel case
|
update import path for snappy
|
package main
import (
"bytes"
"crypto/rand"
"encoding/json"
"io/ioutil"
"log"
"os"
"github.com/cloudfoundry/bosh-bootloader/application"
"github.com/cloudfoundry/bosh-bootloader/aws"
"github.com/cloudfoundry/bosh-bootloader/aws/clientmanager"
"github.com/cloudfoundry/bosh-bootloader/aws/cloudformation"
"github.com/cloudfoundry/bosh-bootloader/aws/cloudformation/templates"
"github.com/cloudfoundry/bosh-bootloader/aws/ec2"
"github.com/cloudfoundry/bosh-bootloader/aws/iam"
"github.com/cloudfoundry/bosh-bootloader/azure"
"github.com/cloudfoundry/bosh-bootloader/bosh"
"github.com/cloudfoundry/bosh-bootloader/certs"
"github.com/cloudfoundry/bosh-bootloader/cloudconfig"
"github.com/cloudfoundry/bosh-bootloader/commands"
"github.com/cloudfoundry/bosh-bootloader/config"
"github.com/cloudfoundry/bosh-bootloader/gcp"
"github.com/cloudfoundry/bosh-bootloader/helpers"
"github.com/cloudfoundry/bosh-bootloader/proxy"
"github.com/cloudfoundry/bosh-bootloader/stack"
"github.com/cloudfoundry/bosh-bootloader/storage"
"github.com/cloudfoundry/bosh-bootloader/terraform"
awsapplication "github.com/cloudfoundry/bosh-bootloader/application/aws"
gcpapplication "github.com/cloudfoundry/bosh-bootloader/application/gcp"
awscloudconfig "github.com/cloudfoundry/bosh-bootloader/cloudconfig/aws"
azurecloudconfig "github.com/cloudfoundry/bosh-bootloader/cloudconfig/azure"
gcpcloudconfig "github.com/cloudfoundry/bosh-bootloader/cloudconfig/gcp"
awsterraform "github.com/cloudfoundry/bosh-bootloader/terraform/aws"
azureterraform "github.com/cloudfoundry/bosh-bootloader/terraform/azure"
gcpterraform "github.com/cloudfoundry/bosh-bootloader/terraform/gcp"
)
var (
Version string
gcpBasePath string
)
func main() {
newConfig := config.NewConfig(storage.GetState)
parsedFlags, err := newConfig.Bootstrap(os.Args)
log.SetFlags(0)
if err != nil {
log.Fatalf("\n\n%s\n", err)
}
loadedState := parsedFlags.State
appConfig := &application.Configuration{
Global: application.GlobalConfiguration{
StateDir: parsedFlags.StateDir,
Debug: parsedFlags.Debug,
},
State: loadedState,
ShowCommandHelp: parsedFlags.Help,
}
if len(parsedFlags.RemainingArgs) > 0 {
appConfig.Command = parsedFlags.RemainingArgs[0]
appConfig.SubcommandFlags = parsedFlags.RemainingArgs[1:]
} else {
appConfig.ShowCommandHelp = false
if parsedFlags.Help {
appConfig.Command = "help"
}
if parsedFlags.Version {
appConfig.Command = "version"
}
}
if len(os.Args) == 1 {
appConfig.Command = "help"
}
needsIAASConfig := config.NeedsIAASConfig(appConfig.Command)
if needsIAASConfig {
err = config.ValidateIAAS(appConfig.State, appConfig.Command)
if err != nil {
log.Fatalf("\n\n%s\n", err)
}
}
// Utilities
envIDGenerator := helpers.NewEnvIDGenerator(rand.Reader)
envGetter := helpers.NewEnvGetter()
logger := application.NewLogger(os.Stdout)
stderrLogger := application.NewLogger(os.Stderr)
// Usage Command
usage := commands.NewUsage(logger)
storage.GetStateLogger = stderrLogger
stateStore := storage.NewStore(parsedFlags.StateDir)
stateValidator := application.NewStateValidator(parsedFlags.StateDir)
awsClientProvider := &clientmanager.ClientProvider{}
if appConfig.State.IAAS == "aws" && needsIAASConfig {
awsConfiguration := aws.Config{
AccessKeyID: appConfig.State.AWS.AccessKeyID,
SecretAccessKey: appConfig.State.AWS.SecretAccessKey,
Region: appConfig.State.AWS.Region,
}
awsClientProvider.SetConfig(awsConfiguration)
}
vpcStatusChecker := ec2.NewVPCStatusChecker(awsClientProvider)
awsAvailabilityZoneRetriever := ec2.NewAvailabilityZoneRetriever(awsClientProvider)
templateBuilder := templates.NewTemplateBuilder(logger)
stackManager := cloudformation.NewStackManager(awsClientProvider, logger)
infrastructureManager := cloudformation.NewInfrastructureManager(templateBuilder, stackManager)
certificateDescriber := iam.NewCertificateDescriber(awsClientProvider)
certificateDeleter := iam.NewCertificateDeleter(awsClientProvider)
certificateValidator := certs.NewValidator()
userPolicyDeleter := iam.NewUserPolicyDeleter(awsClientProvider)
awsKeyPairDeleter := ec2.NewKeyPair(awsClientProvider, logger)
gcpClientProvider := gcp.NewClientProvider(gcpBasePath)
if appConfig.State.IAAS == "gcp" && needsIAASConfig {
err = gcpClientProvider.SetConfig(appConfig.State.GCP.ServiceAccountKey, appConfig.State.GCP.ProjectID, appConfig.State.GCP.Region, appConfig.State.GCP.Zone)
if err != nil {
log.Fatalf("\n\n%s\n", err)
}
}
gcpNetworkInstancesChecker := gcp.NewNetworkInstancesChecker(gcpClientProvider.Client())
// EnvID
envIDManager := helpers.NewEnvIDManager(envIDGenerator, gcpClientProvider.Client(), infrastructureManager, awsClientProvider.GetEC2Client())
// Terraform
terraformOutputBuffer := bytes.NewBuffer([]byte{})
terraformCmd := terraform.NewCmd(os.Stderr, terraformOutputBuffer)
terraformExecutor := terraform.NewExecutor(terraformCmd, parsedFlags.Debug)
gcpTemplateGenerator := gcpterraform.NewTemplateGenerator()
gcpInputGenerator := gcpterraform.NewInputGenerator()
gcpOutputGenerator := gcpterraform.NewOutputGenerator(terraformExecutor)
awsTemplateGenerator := awsterraform.NewTemplateGenerator()
awsInputGenerator := awsterraform.NewInputGenerator(awsAvailabilityZoneRetriever)
awsOutputGenerator := awsterraform.NewOutputGenerator(terraformExecutor)
azureTemplateGenerator := azureterraform.NewTemplateGenerator()
azureInputGenerator := azureterraform.NewInputGenerator()
azureOutputGenerator := azureterraform.NewOutputGenerator(terraformExecutor)
templateGenerator := terraform.NewTemplateGenerator(gcpTemplateGenerator, awsTemplateGenerator, azureTemplateGenerator)
inputGenerator := terraform.NewInputGenerator(gcpInputGenerator, awsInputGenerator, azureInputGenerator)
stackMigrator := stack.NewMigrator(terraformExecutor, infrastructureManager, certificateDescriber, userPolicyDeleter, awsAvailabilityZoneRetriever, awsKeyPairDeleter)
terraformManager := terraform.NewManager(terraform.NewManagerArgs{
Executor: terraformExecutor,
TemplateGenerator: templateGenerator,
InputGenerator: inputGenerator,
AWSOutputGenerator: awsOutputGenerator,
AzureOutputGenerator: azureOutputGenerator,
GCPOutputGenerator: gcpOutputGenerator,
TerraformOutputBuffer: terraformOutputBuffer,
Logger: logger,
StackMigrator: stackMigrator,
})
// BOSH
hostKeyGetter := proxy.NewHostKeyGetter()
socks5Proxy := proxy.NewSocks5Proxy(logger, hostKeyGetter, 0)
boshCommand := bosh.NewCmd(os.Stderr)
boshExecutor := bosh.NewExecutor(boshCommand, ioutil.TempDir, ioutil.ReadFile, json.Unmarshal,
json.Marshal, ioutil.WriteFile)
boshManager := bosh.NewManager(boshExecutor, logger, socks5Proxy)
boshClientProvider := bosh.NewClientProvider(socks5Proxy)
// Environment Validators
awsEnvironmentValidator := awsapplication.NewEnvironmentValidator(infrastructureManager, boshClientProvider)
gcpEnvironmentValidator := gcpapplication.NewEnvironmentValidator(boshClientProvider)
// Cloud Config
sshKeyGetter := bosh.NewSSHKeyGetter()
awsCloudFormationOpsGenerator := awscloudconfig.NewCloudFormationOpsGenerator(awsAvailabilityZoneRetriever, infrastructureManager)
awsTerraformOpsGenerator := awscloudconfig.NewTerraformOpsGenerator(terraformManager)
gcpOpsGenerator := gcpcloudconfig.NewOpsGenerator(terraformManager)
azureOpsGenerator := azurecloudconfig.NewOpsGenerator(terraformManager)
cloudConfigOpsGenerator := cloudconfig.NewOpsGenerator(awsCloudFormationOpsGenerator, awsTerraformOpsGenerator, gcpOpsGenerator, azureOpsGenerator)
cloudConfigManager := cloudconfig.NewManager(logger, boshCommand, cloudConfigOpsGenerator, boshClientProvider, socks5Proxy, terraformManager, sshKeyGetter)
// Subcommands
awsUp := commands.NewAWSUp(boshManager, cloudConfigManager, stateStore, awsClientProvider, envIDManager, terraformManager)
awsCreateLBs := commands.NewAWSCreateLBs(cloudConfigManager, stateStore, terraformManager, awsEnvironmentValidator)
awsLBs := commands.NewAWSLBs(terraformManager, logger)
awsUpdateLBs := commands.NewAWSUpdateLBs(awsCreateLBs)
awsDeleteLBs := commands.NewAWSDeleteLBs(cloudConfigManager, stateStore, awsEnvironmentValidator, terraformManager)
azureClient := azure.NewClient()
azureUp := commands.NewAzureUp(azureClient, boshManager, cloudConfigManager, envIDManager, logger, stateStore, terraformManager)
azureDeleteLBs := commands.NewAzureDeleteLBs(cloudConfigManager, stateStore, terraformManager)
gcpUp := commands.NewGCPUp(commands.NewGCPUpArgs{
StateStore: stateStore,
TerraformManager: terraformManager,
BoshManager: boshManager,
Logger: logger,
EnvIDManager: envIDManager,
CloudConfigManager: cloudConfigManager,
GCPAvailabilityZoneRetriever: gcpClientProvider.Client(),
})
gcpCreateLBs := commands.NewGCPCreateLBs(terraformManager, cloudConfigManager, stateStore, gcpEnvironmentValidator, gcpClientProvider.Client())
gcpLBs := commands.NewGCPLBs(terraformManager, logger)
gcpUpdateLBs := commands.NewGCPUpdateLBs(gcpCreateLBs)
gcpDeleteLBs := commands.NewGCPDeleteLBs(stateStore, gcpEnvironmentValidator, terraformManager, cloudConfigManager)
up := commands.NewUp(awsUp, gcpUp, azureUp, envGetter, boshManager)
// Commands
commandSet := application.CommandSet{}
commandSet["help"] = usage
commandSet["version"] = commands.NewVersion(Version, logger)
commandSet["up"] = up
sshKeyDeleter := bosh.NewSSHKeyDeleter()
commandSet["rotate"] = commands.NewRotate(stateValidator, sshKeyDeleter, up)
commandSet["destroy"] = commands.NewDestroy(logger, os.Stdin, boshManager, vpcStatusChecker, stackManager, infrastructureManager, certificateDeleter, stateStore, stateValidator, terraformManager, gcpNetworkInstancesChecker)
commandSet["down"] = commandSet["destroy"]
commandSet["create-lbs"] = commands.NewCreateLBs(awsCreateLBs, gcpCreateLBs, logger, stateValidator, certificateValidator, boshManager)
commandSet["update-lbs"] = commands.NewUpdateLBs(awsUpdateLBs, gcpUpdateLBs, certificateValidator, stateValidator, logger, boshManager)
commandSet["delete-lbs"] = commands.NewDeleteLBs(awsDeleteLBs, azureDeleteLBs, gcpDeleteLBs, logger, stateValidator, boshManager)
commandSet["lbs"] = commands.NewLBs(gcpLBs, awsLBs, stateValidator, logger)
commandSet["jumpbox-address"] = commands.NewStateQuery(logger, stateValidator, terraformManager, infrastructureManager, commands.JumpboxAddressPropertyName)
commandSet["director-address"] = commands.NewStateQuery(logger, stateValidator, terraformManager, infrastructureManager, commands.DirectorAddressPropertyName)
commandSet["director-username"] = commands.NewStateQuery(logger, stateValidator, terraformManager, infrastructureManager, commands.DirectorUsernamePropertyName)
commandSet["director-password"] = commands.NewStateQuery(logger, stateValidator, terraformManager, infrastructureManager, commands.DirectorPasswordPropertyName)
commandSet["director-ca-cert"] = commands.NewStateQuery(logger, stateValidator, terraformManager, infrastructureManager, commands.DirectorCACertPropertyName)
commandSet["ssh-key"] = commands.NewSSHKey(logger, stateValidator, sshKeyGetter)
commandSet["env-id"] = commands.NewStateQuery(logger, stateValidator, terraformManager, infrastructureManager, commands.EnvIDPropertyName)
commandSet["latest-error"] = commands.NewLatestError(logger, stateValidator)
commandSet["print-env"] = commands.NewPrintEnv(logger, stateValidator, terraformManager)
commandSet["cloud-config"] = commands.NewCloudConfig(logger, stateValidator, cloudConfigManager)
commandSet["bosh-deployment-vars"] = commands.NewBOSHDeploymentVars(logger, boshManager, stateValidator, terraformManager)
app := application.New(commandSet, *appConfig, usage)
err = app.Run()
if err != nil {
log.Fatalf("\n\n%s\n", err)
}
}
Showing help for a command does not require IAAS keys.
[#150911980]
Signed-off-by: Genevieve LEsperance <1b6a7b865633211adcfeed81fa254bd2ba36f0db@pivotal.io>
package main
import (
"bytes"
"crypto/rand"
"encoding/json"
"io/ioutil"
"log"
"os"
"github.com/cloudfoundry/bosh-bootloader/application"
"github.com/cloudfoundry/bosh-bootloader/aws"
"github.com/cloudfoundry/bosh-bootloader/aws/clientmanager"
"github.com/cloudfoundry/bosh-bootloader/aws/cloudformation"
"github.com/cloudfoundry/bosh-bootloader/aws/cloudformation/templates"
"github.com/cloudfoundry/bosh-bootloader/aws/ec2"
"github.com/cloudfoundry/bosh-bootloader/aws/iam"
"github.com/cloudfoundry/bosh-bootloader/azure"
"github.com/cloudfoundry/bosh-bootloader/bosh"
"github.com/cloudfoundry/bosh-bootloader/certs"
"github.com/cloudfoundry/bosh-bootloader/cloudconfig"
"github.com/cloudfoundry/bosh-bootloader/commands"
"github.com/cloudfoundry/bosh-bootloader/config"
"github.com/cloudfoundry/bosh-bootloader/gcp"
"github.com/cloudfoundry/bosh-bootloader/helpers"
"github.com/cloudfoundry/bosh-bootloader/proxy"
"github.com/cloudfoundry/bosh-bootloader/stack"
"github.com/cloudfoundry/bosh-bootloader/storage"
"github.com/cloudfoundry/bosh-bootloader/terraform"
awsapplication "github.com/cloudfoundry/bosh-bootloader/application/aws"
gcpapplication "github.com/cloudfoundry/bosh-bootloader/application/gcp"
awscloudconfig "github.com/cloudfoundry/bosh-bootloader/cloudconfig/aws"
azurecloudconfig "github.com/cloudfoundry/bosh-bootloader/cloudconfig/azure"
gcpcloudconfig "github.com/cloudfoundry/bosh-bootloader/cloudconfig/gcp"
awsterraform "github.com/cloudfoundry/bosh-bootloader/terraform/aws"
azureterraform "github.com/cloudfoundry/bosh-bootloader/terraform/azure"
gcpterraform "github.com/cloudfoundry/bosh-bootloader/terraform/gcp"
)
var (
Version string
gcpBasePath string
)
func main() {
newConfig := config.NewConfig(storage.GetState)
parsedFlags, err := newConfig.Bootstrap(os.Args)
log.SetFlags(0)
if err != nil {
log.Fatalf("\n\n%s\n", err)
}
appConfig := &application.Configuration{
Global: application.GlobalConfiguration{
StateDir: parsedFlags.StateDir,
Debug: parsedFlags.Debug,
},
State: parsedFlags.State,
ShowCommandHelp: parsedFlags.Help,
}
if len(parsedFlags.RemainingArgs) > 0 {
appConfig.Command = parsedFlags.RemainingArgs[0]
appConfig.SubcommandFlags = parsedFlags.RemainingArgs[1:]
} else {
appConfig.ShowCommandHelp = false
if parsedFlags.Help {
appConfig.Command = "help"
}
if parsedFlags.Version {
appConfig.Command = "version"
}
}
if len(os.Args) == 1 {
appConfig.Command = "help"
}
needsIAASConfig := config.NeedsIAASConfig(appConfig.Command) && !appConfig.ShowCommandHelp
if needsIAASConfig {
err = config.ValidateIAAS(appConfig.State, appConfig.Command)
if err != nil {
log.Fatalf("\n\n%s\n", err)
}
}
// Utilities
envIDGenerator := helpers.NewEnvIDGenerator(rand.Reader)
envGetter := helpers.NewEnvGetter()
logger := application.NewLogger(os.Stdout)
stderrLogger := application.NewLogger(os.Stderr)
// Usage Command
usage := commands.NewUsage(logger)
storage.GetStateLogger = stderrLogger
stateStore := storage.NewStore(parsedFlags.StateDir)
stateValidator := application.NewStateValidator(parsedFlags.StateDir)
awsClientProvider := &clientmanager.ClientProvider{}
if appConfig.State.IAAS == "aws" && needsIAASConfig {
awsConfiguration := aws.Config{
AccessKeyID: appConfig.State.AWS.AccessKeyID,
SecretAccessKey: appConfig.State.AWS.SecretAccessKey,
Region: appConfig.State.AWS.Region,
}
awsClientProvider.SetConfig(awsConfiguration)
}
vpcStatusChecker := ec2.NewVPCStatusChecker(awsClientProvider)
awsAvailabilityZoneRetriever := ec2.NewAvailabilityZoneRetriever(awsClientProvider)
templateBuilder := templates.NewTemplateBuilder(logger)
stackManager := cloudformation.NewStackManager(awsClientProvider, logger)
infrastructureManager := cloudformation.NewInfrastructureManager(templateBuilder, stackManager)
certificateDescriber := iam.NewCertificateDescriber(awsClientProvider)
certificateDeleter := iam.NewCertificateDeleter(awsClientProvider)
certificateValidator := certs.NewValidator()
userPolicyDeleter := iam.NewUserPolicyDeleter(awsClientProvider)
awsKeyPairDeleter := ec2.NewKeyPair(awsClientProvider, logger)
gcpClientProvider := gcp.NewClientProvider(gcpBasePath)
if appConfig.State.IAAS == "gcp" && needsIAASConfig {
err = gcpClientProvider.SetConfig(appConfig.State.GCP.ServiceAccountKey, appConfig.State.GCP.ProjectID, appConfig.State.GCP.Region, appConfig.State.GCP.Zone)
if err != nil {
log.Fatalf("\n\n%s\n", err)
}
}
gcpNetworkInstancesChecker := gcp.NewNetworkInstancesChecker(gcpClientProvider.Client())
// EnvID
envIDManager := helpers.NewEnvIDManager(envIDGenerator, gcpClientProvider.Client(), infrastructureManager, awsClientProvider.GetEC2Client())
// Terraform
terraformOutputBuffer := bytes.NewBuffer([]byte{})
terraformCmd := terraform.NewCmd(os.Stderr, terraformOutputBuffer)
terraformExecutor := terraform.NewExecutor(terraformCmd, parsedFlags.Debug)
gcpTemplateGenerator := gcpterraform.NewTemplateGenerator()
gcpInputGenerator := gcpterraform.NewInputGenerator()
gcpOutputGenerator := gcpterraform.NewOutputGenerator(terraformExecutor)
awsTemplateGenerator := awsterraform.NewTemplateGenerator()
awsInputGenerator := awsterraform.NewInputGenerator(awsAvailabilityZoneRetriever)
awsOutputGenerator := awsterraform.NewOutputGenerator(terraformExecutor)
azureTemplateGenerator := azureterraform.NewTemplateGenerator()
azureInputGenerator := azureterraform.NewInputGenerator()
azureOutputGenerator := azureterraform.NewOutputGenerator(terraformExecutor)
templateGenerator := terraform.NewTemplateGenerator(gcpTemplateGenerator, awsTemplateGenerator, azureTemplateGenerator)
inputGenerator := terraform.NewInputGenerator(gcpInputGenerator, awsInputGenerator, azureInputGenerator)
stackMigrator := stack.NewMigrator(terraformExecutor, infrastructureManager, certificateDescriber, userPolicyDeleter, awsAvailabilityZoneRetriever, awsKeyPairDeleter)
terraformManager := terraform.NewManager(terraform.NewManagerArgs{
Executor: terraformExecutor,
TemplateGenerator: templateGenerator,
InputGenerator: inputGenerator,
AWSOutputGenerator: awsOutputGenerator,
AzureOutputGenerator: azureOutputGenerator,
GCPOutputGenerator: gcpOutputGenerator,
TerraformOutputBuffer: terraformOutputBuffer,
Logger: logger,
StackMigrator: stackMigrator,
})
// BOSH
hostKeyGetter := proxy.NewHostKeyGetter()
socks5Proxy := proxy.NewSocks5Proxy(logger, hostKeyGetter, 0)
boshCommand := bosh.NewCmd(os.Stderr)
boshExecutor := bosh.NewExecutor(boshCommand, ioutil.TempDir, ioutil.ReadFile, json.Unmarshal,
json.Marshal, ioutil.WriteFile)
boshManager := bosh.NewManager(boshExecutor, logger, socks5Proxy)
boshClientProvider := bosh.NewClientProvider(socks5Proxy)
// Environment Validators
awsEnvironmentValidator := awsapplication.NewEnvironmentValidator(infrastructureManager, boshClientProvider)
gcpEnvironmentValidator := gcpapplication.NewEnvironmentValidator(boshClientProvider)
// Cloud Config
sshKeyGetter := bosh.NewSSHKeyGetter()
awsCloudFormationOpsGenerator := awscloudconfig.NewCloudFormationOpsGenerator(awsAvailabilityZoneRetriever, infrastructureManager)
awsTerraformOpsGenerator := awscloudconfig.NewTerraformOpsGenerator(terraformManager)
gcpOpsGenerator := gcpcloudconfig.NewOpsGenerator(terraformManager)
azureOpsGenerator := azurecloudconfig.NewOpsGenerator(terraformManager)
cloudConfigOpsGenerator := cloudconfig.NewOpsGenerator(awsCloudFormationOpsGenerator, awsTerraformOpsGenerator, gcpOpsGenerator, azureOpsGenerator)
cloudConfigManager := cloudconfig.NewManager(logger, boshCommand, cloudConfigOpsGenerator, boshClientProvider, socks5Proxy, terraformManager, sshKeyGetter)
// Subcommands
awsUp := commands.NewAWSUp(boshManager, cloudConfigManager, stateStore, awsClientProvider, envIDManager, terraformManager)
awsCreateLBs := commands.NewAWSCreateLBs(cloudConfigManager, stateStore, terraformManager, awsEnvironmentValidator)
awsLBs := commands.NewAWSLBs(terraformManager, logger)
awsUpdateLBs := commands.NewAWSUpdateLBs(awsCreateLBs)
awsDeleteLBs := commands.NewAWSDeleteLBs(cloudConfigManager, stateStore, awsEnvironmentValidator, terraformManager)
azureClient := azure.NewClient()
azureUp := commands.NewAzureUp(azureClient, boshManager, cloudConfigManager, envIDManager, logger, stateStore, terraformManager)
azureDeleteLBs := commands.NewAzureDeleteLBs(cloudConfigManager, stateStore, terraformManager)
gcpUp := commands.NewGCPUp(commands.NewGCPUpArgs{
StateStore: stateStore,
TerraformManager: terraformManager,
BoshManager: boshManager,
Logger: logger,
EnvIDManager: envIDManager,
CloudConfigManager: cloudConfigManager,
GCPAvailabilityZoneRetriever: gcpClientProvider.Client(),
})
gcpCreateLBs := commands.NewGCPCreateLBs(terraformManager, cloudConfigManager, stateStore, gcpEnvironmentValidator, gcpClientProvider.Client())
gcpLBs := commands.NewGCPLBs(terraformManager, logger)
gcpUpdateLBs := commands.NewGCPUpdateLBs(gcpCreateLBs)
gcpDeleteLBs := commands.NewGCPDeleteLBs(stateStore, gcpEnvironmentValidator, terraformManager, cloudConfigManager)
up := commands.NewUp(awsUp, gcpUp, azureUp, envGetter, boshManager)
// Commands
commandSet := application.CommandSet{}
commandSet["help"] = usage
commandSet["version"] = commands.NewVersion(Version, logger)
commandSet["up"] = up
sshKeyDeleter := bosh.NewSSHKeyDeleter()
commandSet["rotate"] = commands.NewRotate(stateValidator, sshKeyDeleter, up)
commandSet["destroy"] = commands.NewDestroy(logger, os.Stdin, boshManager, vpcStatusChecker, stackManager, infrastructureManager, certificateDeleter, stateStore, stateValidator, terraformManager, gcpNetworkInstancesChecker)
commandSet["down"] = commandSet["destroy"]
commandSet["create-lbs"] = commands.NewCreateLBs(awsCreateLBs, gcpCreateLBs, logger, stateValidator, certificateValidator, boshManager)
commandSet["update-lbs"] = commands.NewUpdateLBs(awsUpdateLBs, gcpUpdateLBs, certificateValidator, stateValidator, logger, boshManager)
commandSet["delete-lbs"] = commands.NewDeleteLBs(awsDeleteLBs, azureDeleteLBs, gcpDeleteLBs, logger, stateValidator, boshManager)
commandSet["lbs"] = commands.NewLBs(gcpLBs, awsLBs, stateValidator, logger)
commandSet["jumpbox-address"] = commands.NewStateQuery(logger, stateValidator, terraformManager, infrastructureManager, commands.JumpboxAddressPropertyName)
commandSet["director-address"] = commands.NewStateQuery(logger, stateValidator, terraformManager, infrastructureManager, commands.DirectorAddressPropertyName)
commandSet["director-username"] = commands.NewStateQuery(logger, stateValidator, terraformManager, infrastructureManager, commands.DirectorUsernamePropertyName)
commandSet["director-password"] = commands.NewStateQuery(logger, stateValidator, terraformManager, infrastructureManager, commands.DirectorPasswordPropertyName)
commandSet["director-ca-cert"] = commands.NewStateQuery(logger, stateValidator, terraformManager, infrastructureManager, commands.DirectorCACertPropertyName)
commandSet["ssh-key"] = commands.NewSSHKey(logger, stateValidator, sshKeyGetter)
commandSet["env-id"] = commands.NewStateQuery(logger, stateValidator, terraformManager, infrastructureManager, commands.EnvIDPropertyName)
commandSet["latest-error"] = commands.NewLatestError(logger, stateValidator)
commandSet["print-env"] = commands.NewPrintEnv(logger, stateValidator, terraformManager)
commandSet["cloud-config"] = commands.NewCloudConfig(logger, stateValidator, cloudConfigManager)
commandSet["bosh-deployment-vars"] = commands.NewBOSHDeploymentVars(logger, boshManager, stateValidator, terraformManager)
app := application.New(commandSet, *appConfig, usage)
err = app.Run()
if err != nil {
log.Fatalf("\n\n%s\n", err)
}
}
|
package api
import (
"database/sql"
"fmt"
"log"
"net/http"
"strconv"
"strings"
"github.com/mdlayher/wavepipe/data"
"github.com/mdlayher/wavepipe/transcode"
"github.com/go-martini/martini"
"github.com/martini-contrib/render"
)
// GetTranscode returns a transcoded media file stream from wavepipe. On success, this API will
// return a binary transcode. On failure, it will return a JSON error.
func GetTranscode(httpReq *http.Request, httpRes http.ResponseWriter, r render.Render, params martini.Params) {
// Output struct for transcode errors
res := ErrorResponse{render: r}
// Check API version
if version, ok := params["version"]; ok {
// Check if this API call is supported in the advertised version
if !apiVersionSet.Has(version) {
res.RenderError(400, "unsupported API version: "+version)
return
}
}
// Check for an ID parameter
pID, ok := params["id"]
if !ok {
res.RenderError(400, "no integer transcode ID provided")
return
}
// Verify valid integer ID
id, err := strconv.Atoi(pID)
if err != nil {
res.RenderError(400, "invalid integer transcode ID")
return
}
// Attempt to load the song with matching ID
song := new(data.Song)
song.ID = id
if err := song.Load(); err != nil {
// Check for invalid ID
if err == sql.ErrNoRows {
res.RenderError(404, "song ID not found")
return
}
// All other errors
log.Println(err)
res.ServerError()
return
}
// Attempt to access data stream
stream, err := song.Stream()
if err != nil {
log.Println(err)
res.ServerError()
return
}
defer stream.Close()
// Check for an input codec
query := httpReq.URL.Query()
codec := strings.ToUpper(query.Get("codec"))
if codec == "" {
// Default to MP3
codec = "MP3"
}
// Check for an input quality
quality := query.Get("quality")
if quality == "" {
// Default to 192kbps
quality = "192"
}
// Create a transcoder using factory
transcoder, err := transcode.Factory(codec, quality)
if err != nil {
// Check for client errors
switch err {
// Invalid codec selected
case transcode.ErrInvalidCodec:
res.RenderError(400, "invalid transcoder codec: "+codec)
return
// Invalid quality for codec
case transcode.ErrInvalidQuality:
res.RenderError(400, "invalid quality for codec "+codec+": "+quality)
return
// Transcoding subsystem disabled
case transcode.ErrTranscodingDisabled:
res.RenderError(503, "ffmpeg not found, transcoding disabled")
return
// MP3 transcoding disabled
case transcode.ErrMP3Disabled:
res.RenderError(503, "ffmpeg codec "+transcode.FFMpegMP3Codec+" not found, MP3 transcoding disabled")
return
// OGG transcoding disabled
case transcode.ErrOGGDisabled:
res.RenderError(503, "ffmpeg codec "+transcode.FFMpegOGGCodec+" not found, OGG transcoding disabled")
return
// All other errors
default:
log.Println(err)
res.ServerError()
return
}
}
// Start the transcoder, grab output stream
transcodeStream, err := transcoder.Start(song)
if err != nil {
log.Println(err)
res.ServerError()
}
// Output the command ffmpeg will use to create the transcode
log.Println("transcode: command:", transcoder.Command())
// Now that ffmpeg has started, we must assume binary data is being transferred,
// so no more error JSON may be sent.
// Generate a string used for logging this operation
opStr := fmt.Sprintf("[#%05d] %s - %s [%s %dkbps -> %s %s]", song.ID, song.Artist, song.Title,
data.CodecMap[song.FileTypeID], song.Bitrate, transcoder.Codec(), transcoder.Quality())
// Attempt to send transcoded file stream over HTTP
log.Println("transcode: starting:", opStr)
// Detect MIME type from transcoder
mimeType := transcoder.MIMEType()
// Send transcode stream, no size for now (estimate later), set MIME type from options
if err := httpStream(song, mimeType, -1, transcodeStream, httpRes); err != nil {
// Check for client reset
if strings.Contains(err.Error(), "connection reset by peer") || strings.Contains(err.Error(), "broken pipe") {
return
}
log.Println("transcode: error:", err)
return
}
// Wait for ffmpeg to exit
if err := transcoder.Wait(); err != nil {
log.Println(err)
return
}
log.Println("transcode: completed:", opStr)
return
}
Add OPUS transcoding to API, detect if disabled
package api
import (
"database/sql"
"fmt"
"log"
"net/http"
"strconv"
"strings"
"github.com/mdlayher/wavepipe/data"
"github.com/mdlayher/wavepipe/transcode"
"github.com/go-martini/martini"
"github.com/martini-contrib/render"
)
// GetTranscode returns a transcoded media file stream from wavepipe. On success, this API will
// return a binary transcode. On failure, it will return a JSON error.
func GetTranscode(httpReq *http.Request, httpRes http.ResponseWriter, r render.Render, params martini.Params) {
// Output struct for transcode errors
res := ErrorResponse{render: r}
// Check API version
if version, ok := params["version"]; ok {
// Check if this API call is supported in the advertised version
if !apiVersionSet.Has(version) {
res.RenderError(400, "unsupported API version: "+version)
return
}
}
// Check for an ID parameter
pID, ok := params["id"]
if !ok {
res.RenderError(400, "no integer transcode ID provided")
return
}
// Verify valid integer ID
id, err := strconv.Atoi(pID)
if err != nil {
res.RenderError(400, "invalid integer transcode ID")
return
}
// Attempt to load the song with matching ID
song := new(data.Song)
song.ID = id
if err := song.Load(); err != nil {
// Check for invalid ID
if err == sql.ErrNoRows {
res.RenderError(404, "song ID not found")
return
}
// All other errors
log.Println(err)
res.ServerError()
return
}
// Attempt to access data stream
stream, err := song.Stream()
if err != nil {
log.Println(err)
res.ServerError()
return
}
defer stream.Close()
// Check for an input codec
query := httpReq.URL.Query()
codec := strings.ToUpper(query.Get("codec"))
if codec == "" {
// Default to MP3
codec = "MP3"
}
// Check for an input quality
quality := query.Get("quality")
if quality == "" {
// Default to 192kbps
quality = "192"
}
// Create a transcoder using factory
transcoder, err := transcode.Factory(codec, quality)
if err != nil {
// Check for client errors
switch err {
// Invalid codec selected
case transcode.ErrInvalidCodec:
res.RenderError(400, "invalid transcoder codec: "+codec)
return
// Invalid quality for codec
case transcode.ErrInvalidQuality:
res.RenderError(400, "invalid quality for codec "+codec+": "+quality)
return
// Transcoding subsystem disabled
case transcode.ErrTranscodingDisabled:
res.RenderError(503, "ffmpeg not found, transcoding disabled")
return
// MP3 transcoding disabled
case transcode.ErrMP3Disabled:
res.RenderError(503, "ffmpeg codec "+transcode.FFmpegMP3Codec+" not found, MP3 transcoding disabled")
return
// OGG transcoding disabled
case transcode.ErrOGGDisabled:
res.RenderError(503, "ffmpeg codec "+transcode.FFmpegOGGCodec+" not found, OGG transcoding disabled")
return
// OPUS transcoding disabled
case transcode.ErrOPUSDisabled:
res.RenderError(503, "ffmpeg codec "+transcode.FFmpegOPUSCodec+" not found, OPUS transcoding disabled")
return
// All other errors
default:
log.Println(err)
res.ServerError()
return
}
}
// Start the transcoder, grab output stream
transcodeStream, err := transcoder.Start(song)
if err != nil {
log.Println(err)
res.ServerError()
}
// Output the command ffmpeg will use to create the transcode
log.Println("transcode: command:", transcoder.Command())
// Now that ffmpeg has started, we must assume binary data is being transferred,
// so no more error JSON may be sent.
// Generate a string used for logging this operation
opStr := fmt.Sprintf("[#%05d] %s - %s [%s %dkbps -> %s %s]", song.ID, song.Artist, song.Title,
data.CodecMap[song.FileTypeID], song.Bitrate, transcoder.Codec(), transcoder.Quality())
// Attempt to send transcoded file stream over HTTP
log.Println("transcode: starting:", opStr)
// Detect MIME type from transcoder
mimeType := transcoder.MIMEType()
// Send transcode stream, no size for now (estimate later), set MIME type from options
if err := httpStream(song, mimeType, -1, transcodeStream, httpRes); err != nil {
// Check for client reset
if strings.Contains(err.Error(), "connection reset by peer") || strings.Contains(err.Error(), "broken pipe") {
return
}
log.Println("transcode: error:", err)
return
}
// Wait for ffmpeg to exit
if err := transcoder.Wait(); err != nil {
log.Println(err)
return
}
log.Println("transcode: completed:", opStr)
return
}
|
package apig
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"text/template"
"github.com/wantedly/apig/msg"
"github.com/wantedly/apig/util"
)
var r = regexp.MustCompile(`_templates/skeleton/.*\.tmpl$`)
func generateSkeleton(detail *Detail, outDir string) error {
var wg sync.WaitGroup
errCh := make(chan error, 1)
done := make(chan bool, 1)
for _, skeleton := range AssetNames() {
wg.Add(1)
go func(s string) {
defer wg.Done()
if !r.MatchString(s) {
return
}
trim := strings.Replace(s, "_templates/skeleton/", "", 1)
path := strings.Replace(trim, ".tmpl", "", 1)
dstPath := filepath.Join(outDir, path)
body, err := Asset(s)
if err != nil {
errCh <- err
}
tmpl, err := template.New("complex").Parse(string(body))
if err != nil {
errCh <- err
}
var buf bytes.Buffer
if err := tmpl.Execute(&buf, detail); err != nil {
errCh <- err
}
if !util.FileExists(filepath.Dir(dstPath)) {
if err := util.Mkdir(filepath.Dir(dstPath)); err != nil {
errCh <- err
}
}
if err := ioutil.WriteFile(dstPath, buf.Bytes(), 0644); err != nil {
errCh <- err
}
msg.Printf("\t\x1b[32m%s\x1b[0m %s\n", "create", dstPath)
}(skeleton)
}
wg.Wait()
close(done)
select {
case <-done:
case err := <-errCh:
if err != nil {
return err
}
}
return nil
}
func Skeleton(gopath, vcs, username, project, namespace, database string) int {
detail := &Detail{
VCS: vcs,
User: username,
Project: project,
Namespace: namespace,
Database: database,
}
outDir := filepath.Join(gopath, "src", detail.VCS, detail.User, detail.Project)
if util.FileExists(outDir) {
fmt.Fprintf(os.Stderr, "%s is already exists", outDir)
return 1
}
if err := generateSkeleton(detail, outDir); err != nil {
fmt.Fprintln(os.Stderr, err)
return 1
}
msg.Printf("===> Created %s", outDir)
return 0
}
Format skeleton files
package apig
import (
"bytes"
"fmt"
"go/format"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"text/template"
"github.com/wantedly/apig/msg"
"github.com/wantedly/apig/util"
)
var r = regexp.MustCompile(`_templates/skeleton/.*\.tmpl$`)
func generateSkeleton(detail *Detail, outDir string) error {
var wg sync.WaitGroup
errCh := make(chan error, 1)
done := make(chan bool, 1)
for _, skeleton := range AssetNames() {
wg.Add(1)
go func(s string) {
defer wg.Done()
if !r.MatchString(s) {
return
}
trim := strings.Replace(s, "_templates/skeleton/", "", 1)
path := strings.Replace(trim, ".tmpl", "", 1)
dstPath := filepath.Join(outDir, path)
body, err := Asset(s)
if err != nil {
errCh <- err
}
tmpl, err := template.New("complex").Parse(string(body))
if err != nil {
errCh <- err
}
var buf bytes.Buffer
var src []byte
if err := tmpl.Execute(&buf, detail); err != nil {
errCh <- err
}
if strings.HasSuffix(path, ".go") {
src, err = format.Source(buf.Bytes())
if err != nil {
errCh <- err
}
} else {
src = buf.Bytes()
}
if !util.FileExists(filepath.Dir(dstPath)) {
if err := util.Mkdir(filepath.Dir(dstPath)); err != nil {
errCh <- err
}
}
if err := ioutil.WriteFile(dstPath, src, 0644); err != nil {
errCh <- err
}
msg.Printf("\t\x1b[32m%s\x1b[0m %s\n", "create", dstPath)
}(skeleton)
}
wg.Wait()
close(done)
select {
case <-done:
case err := <-errCh:
if err != nil {
return err
}
}
return nil
}
func Skeleton(gopath, vcs, username, project, namespace, database string) int {
detail := &Detail{
VCS: vcs,
User: username,
Project: project,
Namespace: namespace,
Database: database,
}
outDir := filepath.Join(gopath, "src", detail.VCS, detail.User, detail.Project)
if util.FileExists(outDir) {
fmt.Fprintf(os.Stderr, "%s is already exists", outDir)
return 1
}
if err := generateSkeleton(detail, outDir); err != nil {
fmt.Fprintln(os.Stderr, err)
return 1
}
msg.Printf("===> Created %s", outDir)
return 0
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"github.com/golang/gddo/httputil"
"github.com/gorilla/mux"
"github.com/yunify/metad/backends"
"github.com/yunify/metad/log"
"github.com/yunify/metad/metadata"
"github.com/yunify/metad/util/flatmap"
yaml "gopkg.in/yaml.v2"
"net"
"net/http"
"os"
"os/signal"
"sort"
"strings"
"syscall"
"time"
)
const (
ContentText = 1
ContentJSON = 2
ContentYAML = 3
)
type HttpError struct {
Status int
Message string
}
func NewHttpError(status int, Message string) *HttpError {
return &HttpError{Status: status, Message: Message}
}
func NewServerError(error error) *HttpError {
return &HttpError{Status: http.StatusInternalServerError, Message: error.Error()}
}
func (e HttpError) Error() string {
return fmt.Sprintf("%s", e.Message)
}
type handlerFunc func(req *http.Request) (interface{}, *HttpError)
type Metad struct {
config *Config
metadataRepo *metadata.MetadataRepo
router *mux.Router
manageRouter *mux.Router
resyncChan chan chan error
}
func New(config *Config) (*Metad, error) {
backendsConfig := backends.Config{
Backend: config.Backend,
BasicAuth: config.BasicAuth,
ClientCaKeys: config.ClientCaKeys,
ClientCert: config.ClientCert,
ClientKey: config.ClientKey,
BackendNodes: config.BackendNodes,
Password: config.Password,
Username: config.Username,
Prefix: config.Prefix,
Group: config.Group,
}
storeClient, err := backends.New(backendsConfig)
if err != nil {
return nil, err
}
metadataRepo := metadata.New(config.OnlySelf, storeClient)
return &Metad{config: config, metadataRepo: metadataRepo, router: mux.NewRouter(), manageRouter: mux.NewRouter(), resyncChan: make(chan chan error)}, nil
}
func (m *Metad) Init() {
m.metadataRepo.StartSync()
m.initRouter()
m.initManageRouter()
}
func (m *Metad) initRouter() {
m.router.HandleFunc("/favicon.ico", http.NotFound)
m.router.HandleFunc("/self", m.handlerWrapper(m.selfHandler)).
Methods("GET", "HEAD")
m.router.HandleFunc("/self/{nodePath:.*}", m.handlerWrapper(m.selfHandler)).
Methods("GET", "HEAD")
m.router.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.rootHandler)).
Methods("GET", "HEAD")
}
func (m *Metad) initManageRouter() {
m.manageRouter.HandleFunc("/favicon.ico", http.NotFound)
v1 := m.manageRouter.PathPrefix("/v1").Subrouter()
v1.HandleFunc("/resync", m.handlerWrapper(m.httpResync)).Methods("POST")
v1.HandleFunc("/mapping", m.handlerWrapper(m.mappingGet)).Methods("GET")
v1.HandleFunc("/mapping", m.handlerWrapper(m.mappingUpdate)).Methods("POST", "PUT")
v1.HandleFunc("/mapping", m.handlerWrapper(m.mappingDelete)).Methods("DELETE")
mapping := v1.PathPrefix("/mapping").Subrouter()
//mapping.HandleFunc("", mappingGET).Methods("GET")
mapping.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.mappingGet)).Methods("GET")
mapping.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.mappingUpdate)).Methods("POST", "PUT")
mapping.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.mappingDelete)).Methods("DELETE")
v1.HandleFunc("/data", m.handlerWrapper(m.dataGet)).Methods("GET")
v1.HandleFunc("/data", m.handlerWrapper(m.dataUpdate)).Methods("POST", "PUT")
v1.HandleFunc("/data", m.handlerWrapper(m.dataDelete)).Methods("DELETE")
data := v1.PathPrefix("/data").Subrouter()
//mapping.HandleFunc("", mappingGET).Methods("GET")
data.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.dataGet)).Methods("GET")
data.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.dataUpdate)).Methods("POST", "PUT")
data.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.dataDelete)).Methods("DELETE")
}
func (m *Metad) Serve() {
m.watchSignals()
m.watchManage()
log.Info("Listening on %s", m.config.Listen)
log.Fatal("%v", http.ListenAndServe(m.config.Listen, m.router))
}
func (m *Metad) Stop() {
m.metadataRepo.StopSync()
}
func (m *Metad) watchSignals() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP)
go func() {
for range c {
log.Info("Received HUP signal")
m.resyncChan <- nil
}
}()
go func() {
for resp := range m.resyncChan {
err := m.resync()
if resp != nil {
resp <- err
}
}
}()
notifier := make(chan os.Signal, 1)
signal.Notify(notifier, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-notifier
log.Info("Received stop signal")
signal.Stop(notifier)
m.Stop()
pid := syscall.Getpid()
// exit directly if it is the "init" process, since the kernel will not help to kill pid 1.
if pid == 1 {
os.Exit(0)
}
syscall.Kill(pid, sig.(syscall.Signal))
}()
}
func (m *Metad) watchManage() {
log.Info("Listening for Manage on %s", m.config.ListenManage)
go http.ListenAndServe(m.config.ListenManage, m.manageRouter)
}
func (m *Metad) resync() error {
m.metadataRepo.ReSync()
return nil
}
func (m *Metad) httpResync(req *http.Request) (interface{}, *HttpError) {
respChan := make(chan error)
m.resyncChan <- respChan
err := <-respChan
if err == nil {
return nil, nil
} else {
return nil, NewServerError(err)
}
}
func (m *Metad) dataGet(req *http.Request) (interface{}, *HttpError) {
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
val := m.metadataRepo.GetData(nodePath)
if val == nil {
return nil, NewHttpError(http.StatusNotFound, "Not found")
} else {
return val, nil
}
}
func (m *Metad) dataUpdate(req *http.Request) (interface{}, *HttpError) {
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
decoder := json.NewDecoder(req.Body)
var data interface{}
err := decoder.Decode(&data)
if err != nil {
return nil, NewHttpError(http.StatusBadRequest, fmt.Sprintf("invalid json format, error:%s", err.Error()))
} else {
// POST means replace old value
// PUT means merge to old value
replace := "POST" == strings.ToUpper(req.Method)
err = m.metadataRepo.PutData(nodePath, data, replace)
if err != nil {
if log.IsDebugEnable() {
log.Debug("dataUpdate nodePath:%s, data:%v, error:%s", nodePath, data, err.Error())
}
return nil, NewServerError(err)
} else {
return nil, nil
}
}
}
func (m *Metad) dataDelete(req *http.Request) (interface{}, *HttpError) {
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
subsParam := req.FormValue("subs")
var subs []string
if subsParam != "" {
subs = strings.Split(subsParam, ",")
}
err := m.metadataRepo.DeleteData(nodePath, subs...)
if err != nil {
return nil, NewServerError(err)
} else {
return nil, nil
}
}
func (m *Metad) mappingGet(req *http.Request) (interface{}, *HttpError) {
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
val := m.metadataRepo.GetMapping(nodePath)
if val == nil {
return nil, NewHttpError(http.StatusNotFound, "Not found")
} else {
return val, nil
}
}
func (m *Metad) mappingUpdate(req *http.Request) (interface{}, *HttpError) {
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
decoder := json.NewDecoder(req.Body)
var data interface{}
err := decoder.Decode(&data)
if err != nil {
return nil, NewHttpError(http.StatusBadRequest, fmt.Sprintf("invalid json format, error:%s", err.Error()))
} else {
// POST means replace old value
// PUT means merge to old value
replace := "POST" == strings.ToUpper(req.Method)
err = m.metadataRepo.PutMapping(nodePath, data, replace)
if err != nil {
if log.IsDebugEnable() {
log.Debug("mappingUpdate nodePath:%s, data:%v, error:%s", nodePath, data, err.Error())
}
return nil, NewServerError(err)
} else {
return nil, nil
}
}
}
func (m *Metad) mappingDelete(req *http.Request) (interface{}, *HttpError) {
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
subsParam := req.FormValue("subs")
var subs []string
if subsParam != "" {
subs = strings.Split(subsParam, ",")
}
err := m.metadataRepo.DeleteMapping(nodePath, subs...)
if err != nil {
return nil, NewServerError(err)
} else {
return nil, nil
}
}
func contentType(req *http.Request) int {
str := httputil.NegotiateContentType(req, []string{
"text/plain",
"application/json",
"application/yaml",
"application/x-yaml",
"text/yaml",
"text/x-yaml",
}, "text/plain")
if strings.Contains(str, "json") {
return ContentJSON
} else if strings.Contains(str, "yaml") {
return ContentYAML
} else {
return ContentText
}
}
func (m *Metad) rootHandler(req *http.Request) (interface{}, *HttpError) {
clientIP := m.requestIP(req)
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
wait := strings.ToLower(req.FormValue("wait")) == "true"
var result interface{}
if wait {
change := strings.ToLower(req.FormValue("change")) != "false"
result = m.metadataRepo.Watch(clientIP, nodePath)
if !change {
result = m.metadataRepo.Root(clientIP, nodePath)
}
} else {
result = m.metadataRepo.Root(clientIP, nodePath)
}
if result == nil {
return nil, NewHttpError(http.StatusNotFound, "Not found")
} else {
return result, nil
}
}
func (m *Metad) selfHandler(req *http.Request) (interface{}, *HttpError) {
clientIP := m.requestIP(req)
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
wait := strings.ToLower(req.FormValue("wait")) == "true"
var result interface{}
if wait {
change := strings.ToLower(req.FormValue("change")) != "false"
result = m.metadataRepo.WatchSelf(clientIP, nodePath)
if !change {
result = m.metadataRepo.Self(clientIP, nodePath)
}
} else {
result = m.metadataRepo.Self(clientIP, nodePath)
}
if result == nil {
return nil, NewHttpError(http.StatusNotFound, "Not found")
} else {
return result, nil
}
}
func respondError(w http.ResponseWriter, req *http.Request, msg string, statusCode int) {
obj := make(map[string]interface{})
obj["message"] = msg
obj["type"] = "ERROR"
obj["code"] = statusCode
switch contentType(req) {
case ContentText:
http.Error(w, msg, statusCode)
case ContentJSON:
bytes, err := json.Marshal(obj)
if err == nil {
http.Error(w, string(bytes), statusCode)
} else {
http.Error(w, "{\"type\": \"error\", \"message\": \"JSON marshal error\"}", http.StatusInternalServerError)
}
case ContentYAML:
bytes, err := yaml.Marshal(obj)
if err == nil {
http.Error(w, string(bytes), statusCode)
} else {
http.Error(w, "type: \"error\"\nmessage: \"JSON marshal error\"", http.StatusInternalServerError)
}
}
}
func respondSuccessDefault(w http.ResponseWriter, req *http.Request) {
obj := make(map[string]interface{})
obj["type"] = "OK"
obj["code"] = 200
switch contentType(req) {
case ContentText:
respondText(w, req, "OK")
case ContentJSON:
respondJSON(w, req, obj)
case ContentYAML:
respondYAML(w, req, obj)
}
}
func respondSuccess(w http.ResponseWriter, req *http.Request, val interface{}) int {
switch contentType(req) {
case ContentText:
return respondText(w, req, val)
case ContentJSON:
return respondJSON(w, req, val)
case ContentYAML:
return respondYAML(w, req, val)
}
return 0
}
func respondText(w http.ResponseWriter, req *http.Request, val interface{}) int {
if val == nil {
fmt.Fprint(w, "")
return 0
}
var buffer bytes.Buffer
switch v := val.(type) {
case string:
buffer.WriteString(v)
case map[string]interface{}:
fm := flatmap.Flatten(v)
var keys []string
for k := range fm {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
buffer.WriteString(k)
buffer.WriteString("\t")
buffer.WriteString(fm[k])
buffer.WriteString("\n")
}
default:
log.Error("Value is of a type I don't know how to handle: %v", val)
}
w.Write(buffer.Bytes())
return buffer.Len()
}
func respondJSON(w http.ResponseWriter, req *http.Request, val interface{}) int {
prettyParam := req.FormValue("pretty")
pretty := prettyParam != "" && prettyParam != "false"
var bytes []byte
var err error
if pretty {
bytes, err = json.MarshalIndent(val, "", " ")
} else {
bytes, err = json.Marshal(val)
}
if err == nil {
w.Write(bytes)
} else {
respondError(w, req, "Error serializing to JSON: "+err.Error(), http.StatusInternalServerError)
}
return len(bytes)
}
func respondYAML(w http.ResponseWriter, req *http.Request, val interface{}) int {
bytes, err := yaml.Marshal(val)
if err == nil {
w.Write(bytes)
} else {
respondError(w, req, "Error serializing to YAML: "+err.Error(), http.StatusInternalServerError)
}
return len(bytes)
}
func (m *Metad) requestIP(req *http.Request) string {
if m.config.EnableXff {
clientIp := req.Header.Get("X-Forwarded-For")
if len(clientIp) > 0 {
return clientIp
}
}
clientIp, _, _ := net.SplitHostPort(req.RemoteAddr)
return clientIp
}
func (m *Metad) handlerWrapper(handler handlerFunc) func(w http.ResponseWriter, req *http.Request) {
return func(w http.ResponseWriter, req *http.Request) {
println(req.RequestURI)
start := time.Now().Nanosecond()
result, err := handler(req)
end := time.Now().Nanosecond()
status := 200
var len int
if err != nil {
status = err.Status
respondError(w, req, err.Message, status)
m.errorLog(req, status, err.Message)
} else {
if log.IsDebugEnable() {
log.Debug("reponse success: %v", result)
}
if result == nil {
respondSuccessDefault(w, req)
} else {
len = respondSuccess(w, req, result)
}
}
m.requestLog(req, status, (end-start)/1000, len)
}
}
func (m *Metad) requestLog(req *http.Request, status int, time int, len int) {
log.Info("REQ\t%s\t%s\t%s\t%v\t%v\t%v\t%v", req.Method, m.requestIP(req), req.RequestURI, req.ContentLength, status, time, len)
}
func (m *Metad) errorLog(req *http.Request, status int, msg string) {
if status == 500 {
log.Error("ERR\t%s\t%s\t%s\t%v\t%v\t%s", req.Method, m.requestIP(req), req.RequestURI, req.ContentLength, status, msg)
} else {
log.Warning("ERR\t%s\t%s\t%s\t%v\t%v\t%s", req.Method, m.requestIP(req), req.RequestURI, req.ContentLength, status, msg)
}
}
clear println.
package main
import (
"bytes"
"encoding/json"
"fmt"
"github.com/golang/gddo/httputil"
"github.com/gorilla/mux"
"github.com/yunify/metad/backends"
"github.com/yunify/metad/log"
"github.com/yunify/metad/metadata"
"github.com/yunify/metad/util/flatmap"
yaml "gopkg.in/yaml.v2"
"net"
"net/http"
"os"
"os/signal"
"sort"
"strings"
"syscall"
"time"
)
const (
ContentText = 1
ContentJSON = 2
ContentYAML = 3
)
type HttpError struct {
Status int
Message string
}
func NewHttpError(status int, Message string) *HttpError {
return &HttpError{Status: status, Message: Message}
}
func NewServerError(error error) *HttpError {
return &HttpError{Status: http.StatusInternalServerError, Message: error.Error()}
}
func (e HttpError) Error() string {
return fmt.Sprintf("%s", e.Message)
}
type handlerFunc func(req *http.Request) (interface{}, *HttpError)
type Metad struct {
config *Config
metadataRepo *metadata.MetadataRepo
router *mux.Router
manageRouter *mux.Router
resyncChan chan chan error
}
func New(config *Config) (*Metad, error) {
backendsConfig := backends.Config{
Backend: config.Backend,
BasicAuth: config.BasicAuth,
ClientCaKeys: config.ClientCaKeys,
ClientCert: config.ClientCert,
ClientKey: config.ClientKey,
BackendNodes: config.BackendNodes,
Password: config.Password,
Username: config.Username,
Prefix: config.Prefix,
Group: config.Group,
}
storeClient, err := backends.New(backendsConfig)
if err != nil {
return nil, err
}
metadataRepo := metadata.New(config.OnlySelf, storeClient)
return &Metad{config: config, metadataRepo: metadataRepo, router: mux.NewRouter(), manageRouter: mux.NewRouter(), resyncChan: make(chan chan error)}, nil
}
func (m *Metad) Init() {
m.metadataRepo.StartSync()
m.initRouter()
m.initManageRouter()
}
func (m *Metad) initRouter() {
m.router.HandleFunc("/favicon.ico", http.NotFound)
m.router.HandleFunc("/self", m.handlerWrapper(m.selfHandler)).
Methods("GET", "HEAD")
m.router.HandleFunc("/self/{nodePath:.*}", m.handlerWrapper(m.selfHandler)).
Methods("GET", "HEAD")
m.router.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.rootHandler)).
Methods("GET", "HEAD")
}
func (m *Metad) initManageRouter() {
m.manageRouter.HandleFunc("/favicon.ico", http.NotFound)
v1 := m.manageRouter.PathPrefix("/v1").Subrouter()
v1.HandleFunc("/resync", m.handlerWrapper(m.httpResync)).Methods("POST")
v1.HandleFunc("/mapping", m.handlerWrapper(m.mappingGet)).Methods("GET")
v1.HandleFunc("/mapping", m.handlerWrapper(m.mappingUpdate)).Methods("POST", "PUT")
v1.HandleFunc("/mapping", m.handlerWrapper(m.mappingDelete)).Methods("DELETE")
mapping := v1.PathPrefix("/mapping").Subrouter()
//mapping.HandleFunc("", mappingGET).Methods("GET")
mapping.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.mappingGet)).Methods("GET")
mapping.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.mappingUpdate)).Methods("POST", "PUT")
mapping.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.mappingDelete)).Methods("DELETE")
v1.HandleFunc("/data", m.handlerWrapper(m.dataGet)).Methods("GET")
v1.HandleFunc("/data", m.handlerWrapper(m.dataUpdate)).Methods("POST", "PUT")
v1.HandleFunc("/data", m.handlerWrapper(m.dataDelete)).Methods("DELETE")
data := v1.PathPrefix("/data").Subrouter()
//mapping.HandleFunc("", mappingGET).Methods("GET")
data.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.dataGet)).Methods("GET")
data.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.dataUpdate)).Methods("POST", "PUT")
data.HandleFunc("/{nodePath:.*}", m.handlerWrapper(m.dataDelete)).Methods("DELETE")
}
func (m *Metad) Serve() {
m.watchSignals()
m.watchManage()
log.Info("Listening on %s", m.config.Listen)
log.Fatal("%v", http.ListenAndServe(m.config.Listen, m.router))
}
func (m *Metad) Stop() {
m.metadataRepo.StopSync()
}
func (m *Metad) watchSignals() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGHUP)
go func() {
for range c {
log.Info("Received HUP signal")
m.resyncChan <- nil
}
}()
go func() {
for resp := range m.resyncChan {
err := m.resync()
if resp != nil {
resp <- err
}
}
}()
notifier := make(chan os.Signal, 1)
signal.Notify(notifier, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-notifier
log.Info("Received stop signal")
signal.Stop(notifier)
m.Stop()
pid := syscall.Getpid()
// exit directly if it is the "init" process, since the kernel will not help to kill pid 1.
if pid == 1 {
os.Exit(0)
}
syscall.Kill(pid, sig.(syscall.Signal))
}()
}
func (m *Metad) watchManage() {
log.Info("Listening for Manage on %s", m.config.ListenManage)
go http.ListenAndServe(m.config.ListenManage, m.manageRouter)
}
func (m *Metad) resync() error {
m.metadataRepo.ReSync()
return nil
}
func (m *Metad) httpResync(req *http.Request) (interface{}, *HttpError) {
respChan := make(chan error)
m.resyncChan <- respChan
err := <-respChan
if err == nil {
return nil, nil
} else {
return nil, NewServerError(err)
}
}
func (m *Metad) dataGet(req *http.Request) (interface{}, *HttpError) {
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
val := m.metadataRepo.GetData(nodePath)
if val == nil {
return nil, NewHttpError(http.StatusNotFound, "Not found")
} else {
return val, nil
}
}
func (m *Metad) dataUpdate(req *http.Request) (interface{}, *HttpError) {
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
decoder := json.NewDecoder(req.Body)
var data interface{}
err := decoder.Decode(&data)
if err != nil {
return nil, NewHttpError(http.StatusBadRequest, fmt.Sprintf("invalid json format, error:%s", err.Error()))
} else {
// POST means replace old value
// PUT means merge to old value
replace := "POST" == strings.ToUpper(req.Method)
err = m.metadataRepo.PutData(nodePath, data, replace)
if err != nil {
if log.IsDebugEnable() {
log.Debug("dataUpdate nodePath:%s, data:%v, error:%s", nodePath, data, err.Error())
}
return nil, NewServerError(err)
} else {
return nil, nil
}
}
}
func (m *Metad) dataDelete(req *http.Request) (interface{}, *HttpError) {
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
subsParam := req.FormValue("subs")
var subs []string
if subsParam != "" {
subs = strings.Split(subsParam, ",")
}
err := m.metadataRepo.DeleteData(nodePath, subs...)
if err != nil {
return nil, NewServerError(err)
} else {
return nil, nil
}
}
func (m *Metad) mappingGet(req *http.Request) (interface{}, *HttpError) {
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
val := m.metadataRepo.GetMapping(nodePath)
if val == nil {
return nil, NewHttpError(http.StatusNotFound, "Not found")
} else {
return val, nil
}
}
func (m *Metad) mappingUpdate(req *http.Request) (interface{}, *HttpError) {
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
decoder := json.NewDecoder(req.Body)
var data interface{}
err := decoder.Decode(&data)
if err != nil {
return nil, NewHttpError(http.StatusBadRequest, fmt.Sprintf("invalid json format, error:%s", err.Error()))
} else {
// POST means replace old value
// PUT means merge to old value
replace := "POST" == strings.ToUpper(req.Method)
err = m.metadataRepo.PutMapping(nodePath, data, replace)
if err != nil {
if log.IsDebugEnable() {
log.Debug("mappingUpdate nodePath:%s, data:%v, error:%s", nodePath, data, err.Error())
}
return nil, NewServerError(err)
} else {
return nil, nil
}
}
}
func (m *Metad) mappingDelete(req *http.Request) (interface{}, *HttpError) {
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
subsParam := req.FormValue("subs")
var subs []string
if subsParam != "" {
subs = strings.Split(subsParam, ",")
}
err := m.metadataRepo.DeleteMapping(nodePath, subs...)
if err != nil {
return nil, NewServerError(err)
} else {
return nil, nil
}
}
func contentType(req *http.Request) int {
str := httputil.NegotiateContentType(req, []string{
"text/plain",
"application/json",
"application/yaml",
"application/x-yaml",
"text/yaml",
"text/x-yaml",
}, "text/plain")
if strings.Contains(str, "json") {
return ContentJSON
} else if strings.Contains(str, "yaml") {
return ContentYAML
} else {
return ContentText
}
}
func (m *Metad) rootHandler(req *http.Request) (interface{}, *HttpError) {
clientIP := m.requestIP(req)
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
wait := strings.ToLower(req.FormValue("wait")) == "true"
var result interface{}
if wait {
change := strings.ToLower(req.FormValue("change")) != "false"
result = m.metadataRepo.Watch(clientIP, nodePath)
if !change {
result = m.metadataRepo.Root(clientIP, nodePath)
}
} else {
result = m.metadataRepo.Root(clientIP, nodePath)
}
if result == nil {
return nil, NewHttpError(http.StatusNotFound, "Not found")
} else {
return result, nil
}
}
func (m *Metad) selfHandler(req *http.Request) (interface{}, *HttpError) {
clientIP := m.requestIP(req)
vars := mux.Vars(req)
nodePath := vars["nodePath"]
if nodePath == "" {
nodePath = "/"
}
wait := strings.ToLower(req.FormValue("wait")) == "true"
var result interface{}
if wait {
change := strings.ToLower(req.FormValue("change")) != "false"
result = m.metadataRepo.WatchSelf(clientIP, nodePath)
if !change {
result = m.metadataRepo.Self(clientIP, nodePath)
}
} else {
result = m.metadataRepo.Self(clientIP, nodePath)
}
if result == nil {
return nil, NewHttpError(http.StatusNotFound, "Not found")
} else {
return result, nil
}
}
func respondError(w http.ResponseWriter, req *http.Request, msg string, statusCode int) {
obj := make(map[string]interface{})
obj["message"] = msg
obj["type"] = "ERROR"
obj["code"] = statusCode
switch contentType(req) {
case ContentText:
http.Error(w, msg, statusCode)
case ContentJSON:
bytes, err := json.Marshal(obj)
if err == nil {
http.Error(w, string(bytes), statusCode)
} else {
http.Error(w, "{\"type\": \"error\", \"message\": \"JSON marshal error\"}", http.StatusInternalServerError)
}
case ContentYAML:
bytes, err := yaml.Marshal(obj)
if err == nil {
http.Error(w, string(bytes), statusCode)
} else {
http.Error(w, "type: \"error\"\nmessage: \"JSON marshal error\"", http.StatusInternalServerError)
}
}
}
func respondSuccessDefault(w http.ResponseWriter, req *http.Request) {
obj := make(map[string]interface{})
obj["type"] = "OK"
obj["code"] = 200
switch contentType(req) {
case ContentText:
respondText(w, req, "OK")
case ContentJSON:
respondJSON(w, req, obj)
case ContentYAML:
respondYAML(w, req, obj)
}
}
func respondSuccess(w http.ResponseWriter, req *http.Request, val interface{}) int {
switch contentType(req) {
case ContentText:
return respondText(w, req, val)
case ContentJSON:
return respondJSON(w, req, val)
case ContentYAML:
return respondYAML(w, req, val)
}
return 0
}
func respondText(w http.ResponseWriter, req *http.Request, val interface{}) int {
if val == nil {
fmt.Fprint(w, "")
return 0
}
var buffer bytes.Buffer
switch v := val.(type) {
case string:
buffer.WriteString(v)
case map[string]interface{}:
fm := flatmap.Flatten(v)
var keys []string
for k := range fm {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
buffer.WriteString(k)
buffer.WriteString("\t")
buffer.WriteString(fm[k])
buffer.WriteString("\n")
}
default:
log.Error("Value is of a type I don't know how to handle: %v", val)
}
w.Write(buffer.Bytes())
return buffer.Len()
}
func respondJSON(w http.ResponseWriter, req *http.Request, val interface{}) int {
prettyParam := req.FormValue("pretty")
pretty := prettyParam != "" && prettyParam != "false"
var bytes []byte
var err error
if pretty {
bytes, err = json.MarshalIndent(val, "", " ")
} else {
bytes, err = json.Marshal(val)
}
if err == nil {
w.Write(bytes)
} else {
respondError(w, req, "Error serializing to JSON: "+err.Error(), http.StatusInternalServerError)
}
return len(bytes)
}
func respondYAML(w http.ResponseWriter, req *http.Request, val interface{}) int {
bytes, err := yaml.Marshal(val)
if err == nil {
w.Write(bytes)
} else {
respondError(w, req, "Error serializing to YAML: "+err.Error(), http.StatusInternalServerError)
}
return len(bytes)
}
func (m *Metad) requestIP(req *http.Request) string {
if m.config.EnableXff {
clientIp := req.Header.Get("X-Forwarded-For")
if len(clientIp) > 0 {
return clientIp
}
}
clientIp, _, _ := net.SplitHostPort(req.RemoteAddr)
return clientIp
}
func (m *Metad) handlerWrapper(handler handlerFunc) func(w http.ResponseWriter, req *http.Request) {
return func(w http.ResponseWriter, req *http.Request) {
start := time.Now().Nanosecond()
result, err := handler(req)
end := time.Now().Nanosecond()
status := 200
var len int
if err != nil {
status = err.Status
respondError(w, req, err.Message, status)
m.errorLog(req, status, err.Message)
} else {
if log.IsDebugEnable() {
log.Debug("reponse success: %v", result)
}
if result == nil {
respondSuccessDefault(w, req)
} else {
len = respondSuccess(w, req, result)
}
}
m.requestLog(req, status, (end-start)/1000, len)
}
}
func (m *Metad) requestLog(req *http.Request, status int, time int, len int) {
log.Info("REQ\t%s\t%s\t%s\t%v\t%v\t%v\t%v", req.Method, m.requestIP(req), req.RequestURI, req.ContentLength, status, time, len)
}
func (m *Metad) errorLog(req *http.Request, status int, msg string) {
if status == 500 {
log.Error("ERR\t%s\t%s\t%s\t%v\t%v\t%s", req.Method, m.requestIP(req), req.RequestURI, req.ContentLength, status, msg)
} else {
log.Warning("ERR\t%s\t%s\t%s\t%v\t%v\t%s", req.Method, m.requestIP(req), req.RequestURI, req.ContentLength, status, msg)
}
}
|
package gangliamr
import (
"github.com/daaku/go.ganglia/gmetric"
"github.com/daaku/go.metrics"
)
// Meters count events to produce exponentially-weighted moving average rates
// at one-, five-, and fifteen-minutes and a mean rate.
type Meter struct {
metrics.Meter
Name string // Required.
Title string
Units string // Default is "count".
Description string
Groups []string
base *meterBase
}
func (m *Meter) writeMeta(c *gmetric.Client) {
m.base.writeMeta(c)
}
func (m *Meter) writeValue(c *gmetric.Client) {
m.base.writeValue(c)
}
func (m *Meter) register(r *Registry) {
if m.Meter == nil {
m.Meter = metrics.NewMeter()
}
m.base = &meterBase{
meterMetric: m,
Name: m.Name,
Title: m.Title,
Units: m.Units,
Description: m.Description,
Groups: m.Groups,
}
m.base.register(r)
}
type meterMetric interface {
Count() int64
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
}
type meterBase struct {
meterMetric
Name string // Required.
Title string
Units string // Default is "count".
Description string
Groups []string
count gmetric.Metric
m1rate gmetric.Metric
m5rate gmetric.Metric
m15rate gmetric.Metric
meanRate gmetric.Metric
}
func (m *meterBase) writeMeta(c *gmetric.Client) {
c.WriteMeta(&m.count)
c.WriteMeta(&m.m1rate)
c.WriteMeta(&m.m5rate)
c.WriteMeta(&m.m15rate)
c.WriteMeta(&m.meanRate)
}
func (m *meterBase) writeValue(c *gmetric.Client) {
c.WriteValue(&m.count, m.Count())
c.WriteValue(&m.m1rate, m.Rate1())
c.WriteValue(&m.m5rate, m.Rate5())
c.WriteValue(&m.m15rate, m.Rate15())
c.WriteValue(&m.meanRate, m.RateMean())
}
func (m *meterBase) register(r *Registry) {
m.count = gmetric.Metric{
Name: r.makeName(m.Name, "count"),
Title: makeOptional(m.Title, "count"),
Units: "count",
Description: makeOptional(m.Description, "count"),
Groups: m.Groups,
ValueType: gmetric.ValueInt32,
Slope: gmetric.SlopeBoth,
}
m.m1rate = gmetric.Metric{
Name: r.makeName(m.Name, "one-minute"),
Title: makeOptional(m.Title, "one minute"),
Units: nonEmpty(m.Units, "count"),
Description: makeOptional(m.Description, "one minute"),
Groups: m.Groups,
ValueType: gmetric.ValueFloat64,
Slope: gmetric.SlopeBoth,
}
m.m5rate = gmetric.Metric{
Name: r.makeName(m.Name, "five-minute"),
Title: makeOptional(m.Title, "five minute"),
Units: nonEmpty(m.Units, "count"),
Description: makeOptional(m.Description, "five minute"),
Groups: m.Groups,
ValueType: gmetric.ValueFloat64,
Slope: gmetric.SlopeBoth,
}
m.m15rate = gmetric.Metric{
Name: r.makeName(m.Name, "fifteen-minute"),
Title: makeOptional(m.Title, "fifteen minute"),
Units: nonEmpty(m.Units, "count"),
Description: makeOptional(m.Description, "fifteen minute"),
Groups: m.Groups,
ValueType: gmetric.ValueFloat64,
Slope: gmetric.SlopeBoth,
}
m.meanRate = gmetric.Metric{
Name: r.makeName(m.Name, "mean"),
Title: makeOptional(m.Title, "mean"),
Units: nonEmpty(m.Units, "count"),
Description: makeOptional(m.Description, "mean"),
Groups: m.Groups,
ValueType: gmetric.ValueFloat64,
Slope: gmetric.SlopeBoth,
}
}
dont append count to title for meters
package gangliamr
import (
"github.com/daaku/go.ganglia/gmetric"
"github.com/daaku/go.metrics"
)
// Meters count events to produce exponentially-weighted moving average rates
// at one-, five-, and fifteen-minutes and a mean rate.
type Meter struct {
metrics.Meter
Name string // Required.
Title string
Units string // Default is "count".
Description string
Groups []string
base *meterBase
}
func (m *Meter) writeMeta(c *gmetric.Client) {
m.base.writeMeta(c)
}
func (m *Meter) writeValue(c *gmetric.Client) {
m.base.writeValue(c)
}
func (m *Meter) register(r *Registry) {
if m.Meter == nil {
m.Meter = metrics.NewMeter()
}
m.base = &meterBase{
meterMetric: m,
Name: m.Name,
Title: m.Title,
Units: m.Units,
Description: m.Description,
Groups: m.Groups,
}
m.base.register(r)
}
type meterMetric interface {
Count() int64
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
}
type meterBase struct {
meterMetric
Name string // Required.
Title string
Units string // Default is "count".
Description string
Groups []string
count gmetric.Metric
m1rate gmetric.Metric
m5rate gmetric.Metric
m15rate gmetric.Metric
meanRate gmetric.Metric
}
func (m *meterBase) writeMeta(c *gmetric.Client) {
c.WriteMeta(&m.count)
c.WriteMeta(&m.m1rate)
c.WriteMeta(&m.m5rate)
c.WriteMeta(&m.m15rate)
c.WriteMeta(&m.meanRate)
}
func (m *meterBase) writeValue(c *gmetric.Client) {
c.WriteValue(&m.count, m.Count())
c.WriteValue(&m.m1rate, m.Rate1())
c.WriteValue(&m.m5rate, m.Rate5())
c.WriteValue(&m.m15rate, m.Rate15())
c.WriteValue(&m.meanRate, m.RateMean())
}
func (m *meterBase) register(r *Registry) {
m.count = gmetric.Metric{
Name: r.makeName(m.Name, "count"),
Title: m.Title,
Units: "count",
Description: makeOptional(m.Description, "count"),
Groups: m.Groups,
ValueType: gmetric.ValueInt32,
Slope: gmetric.SlopeBoth,
}
m.m1rate = gmetric.Metric{
Name: r.makeName(m.Name, "one-minute"),
Title: makeOptional(m.Title, "one minute"),
Units: nonEmpty(m.Units, "count"),
Description: makeOptional(m.Description, "one minute"),
Groups: m.Groups,
ValueType: gmetric.ValueFloat64,
Slope: gmetric.SlopeBoth,
}
m.m5rate = gmetric.Metric{
Name: r.makeName(m.Name, "five-minute"),
Title: makeOptional(m.Title, "five minute"),
Units: nonEmpty(m.Units, "count"),
Description: makeOptional(m.Description, "five minute"),
Groups: m.Groups,
ValueType: gmetric.ValueFloat64,
Slope: gmetric.SlopeBoth,
}
m.m15rate = gmetric.Metric{
Name: r.makeName(m.Name, "fifteen-minute"),
Title: makeOptional(m.Title, "fifteen minute"),
Units: nonEmpty(m.Units, "count"),
Description: makeOptional(m.Description, "fifteen minute"),
Groups: m.Groups,
ValueType: gmetric.ValueFloat64,
Slope: gmetric.SlopeBoth,
}
m.meanRate = gmetric.Metric{
Name: r.makeName(m.Name, "mean"),
Title: makeOptional(m.Title, "mean"),
Units: nonEmpty(m.Units, "count"),
Description: makeOptional(m.Description, "mean"),
Groups: m.Groups,
ValueType: gmetric.ValueFloat64,
Slope: gmetric.SlopeBoth,
}
}
|
package main
import (
"flag"
"fmt"
"github.com/slowfei/gosfdoc"
"strings"
)
var (
_lang *string = nil
_configFile = flag.String("config", "gosfdoc.json", "custom config file path.")
)
func init() {
impls := make([]string, 0, 0)
for k, _ := range gosfdoc.MapParser() {
impls = append(impls, k)
}
implstr := strings.Join(impls, ",")
_lang = flag.String("lang", "", "[\""+implstr+"\"] specify code language type, default all language.")
}
/**
* print usage help
*/
func usage() {
fmt.Println(gosfdoc.APPNAME, "v"+gosfdoc.VERSION)
fmt.Println("")
fmt.Println("usage help:")
fmt.Println("'" + gosfdoc.APPNAME + " create' command init create default gosfdoc.json file, can be custom to modify file content.")
fmt.Println("'" + gosfdoc.APPNAME + "' command by gosfdoc.json output document ")
fmt.Println("")
fmt.Println("other param:")
flag.PrintDefaults()
fmt.Println("")
}
func main() {
flag.Usage = usage
flag.Parse()
arg := flag.Args()[0]
if 0 != len(arg) {
switch arg {
case "help":
flag.Usage()
case "version":
fmt.Println(gosfdoc.APPNAME, "v"+gosfdoc.VERSION)
case "create":
case "output":
default:
fmt.Println("invalid command parameter.")
}
return
}
}
增加指定扫描的文件后缀参数
package main
import (
"flag"
"fmt"
"github.com/slowfei/gosfdoc"
"strings"
)
var (
_lang *string = nil
_configFile = flag.String("config", "gosfdoc.json", "custom config file path.")
_specifyFileSuffixes = flag.String("file-suffixes", "", "specify file suffixes, default all. e.g: \"go,java,js,m\"")
)
func init() {
impls := make([]string, 0, 0)
for k, _ := range gosfdoc.MapParser() {
impls = append(impls, k)
}
implstr := strings.Join(impls, ",")
_lang = flag.String("lang", "", "[\""+implstr+"\"] specify code language type, default all language.")
}
/**
* print usage help
*/
func usage() {
fmt.Println(gosfdoc.APPNAME, "v"+gosfdoc.VERSION)
fmt.Println("")
fmt.Println("usage help:")
fmt.Println("'" + gosfdoc.APPNAME + " create' command init create default gosfdoc.json file, can be custom to modify file content.")
fmt.Println("'" + gosfdoc.APPNAME + "' command by gosfdoc.json output document ")
fmt.Println("")
fmt.Println("other param:")
flag.PrintDefaults()
fmt.Println("")
}
func main() {
flag.Usage = usage
flag.Parse()
arg := flag.Args()[0]
if 0 != len(arg) {
switch arg {
case "help":
flag.Usage()
case "version":
fmt.Println(gosfdoc.APPNAME, "v"+gosfdoc.VERSION)
case "create":
case "output":
default:
fmt.Println("invalid command parameter.")
}
return
}
}
|
Update hello.go
|
package aws
import (
"fmt"
"reflect"
"regexp"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSAutoScalingGroup_basic(t *testing.T) {
var group autoscaling.Group
var lc autoscaling.LaunchConfiguration
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAWSAutoScalingGroupHealthyCapacity(&group, 2),
testAccCheckAWSAutoScalingGroupAttributes(&group),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "availability_zones.2487133097", "us-west-2a"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "name", "foobar3-terraform-test"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "max_size", "5"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "min_size", "2"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "health_check_grace_period", "300"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "health_check_type", "ELB"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "desired_capacity", "4"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "force_delete", "true"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "termination_policies.0", "OldestInstance"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "termination_policies.1", "ClosestToNextInstanceHour"),
),
},
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfigUpdate,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.new", &lc),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "desired_capacity", "5"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "termination_policies.0", "ClosestToNextInstanceHour"),
testLaunchConfigurationName("aws_autoscaling_group.bar", &lc),
testAccCheckAutoscalingTags(&group.Tags, "Bar", map[string]interface{}{
"value": "bar-foo",
"propagate_at_launch": true,
}),
),
},
},
})
}
func TestAccAWSAutoScalingGroup_autoGeneratedName(t *testing.T) {
asgNameRegexp := regexp.MustCompile("^tf-asg-")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfig_autoGeneratedName,
Check: resource.ComposeTestCheckFunc(
resource.TestMatchResourceAttr(
"aws_autoscaling_group.bar", "name", asgNameRegexp),
),
},
},
})
}
func TestAccAWSAutoScalingGroup_tags(t *testing.T) {
var group autoscaling.Group
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAutoscalingTags(&group.Tags, "Foo", map[string]interface{}{
"value": "foo-bar",
"propagate_at_launch": true,
}),
),
},
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfigUpdate,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAutoscalingTagNotExists(&group.Tags, "Foo"),
testAccCheckAutoscalingTags(&group.Tags, "Bar", map[string]interface{}{
"value": "bar-foo",
"propagate_at_launch": true,
}),
),
},
},
})
}
func TestAccAWSAutoScalingGroup_VpcUpdates(t *testing.T) {
var group autoscaling.Group
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfigWithAZ,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "availability_zones.#", "1"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "availability_zones.2487133097", "us-west-2a"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "vpc_zone_identifier.#", "1"),
),
},
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfigWithVPCIdent,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAWSAutoScalingGroupAttributesVPCZoneIdentifer(&group),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "availability_zones.#", "2"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "availability_zones.2487133097", "us-west-2a"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "availability_zones.221770259", "us-west-2b"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "vpc_zone_identifier.#", "2"),
),
},
},
})
}
func TestAccAWSAutoScalingGroup_WithLoadBalancer(t *testing.T) {
var group autoscaling.Group
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: fmt.Sprintf(testAccAWSAutoScalingGroupConfigWithLoadBalancer),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(&group),
),
},
},
})
}
func TestAccAWSAutoScalingGroup_withPlacementGroup(t *testing.T) {
var group autoscaling.Group
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfig_withPlacementGroup,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "placement_group", "test"),
),
},
},
})
}
func testAccCheckAWSAutoScalingGroupDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_autoscaling_group" {
continue
}
// Try to find the Group
describeGroups, err := conn.DescribeAutoScalingGroups(
&autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{aws.String(rs.Primary.ID)},
})
if err == nil {
if len(describeGroups.AutoScalingGroups) != 0 &&
*describeGroups.AutoScalingGroups[0].AutoScalingGroupName == rs.Primary.ID {
return fmt.Errorf("AutoScaling Group still exists")
}
}
// Verify the error
ec2err, ok := err.(awserr.Error)
if !ok {
return err
}
if ec2err.Code() != "InvalidGroup.NotFound" {
return err
}
}
return nil
}
func testAccCheckAWSAutoScalingGroupAttributes(group *autoscaling.Group) resource.TestCheckFunc {
return func(s *terraform.State) error {
if *group.AvailabilityZones[0] != "us-west-2a" {
return fmt.Errorf("Bad availability_zones: %#v", group.AvailabilityZones[0])
}
if *group.AutoScalingGroupName != "foobar3-terraform-test" {
return fmt.Errorf("Bad name: %s", *group.AutoScalingGroupName)
}
if *group.MaxSize != 5 {
return fmt.Errorf("Bad max_size: %d", *group.MaxSize)
}
if *group.MinSize != 2 {
return fmt.Errorf("Bad max_size: %d", *group.MinSize)
}
if *group.HealthCheckType != "ELB" {
return fmt.Errorf("Bad health_check_type,\nexpected: %s\ngot: %s", "ELB", *group.HealthCheckType)
}
if *group.HealthCheckGracePeriod != 300 {
return fmt.Errorf("Bad health_check_grace_period: %d", *group.HealthCheckGracePeriod)
}
if *group.DesiredCapacity != 4 {
return fmt.Errorf("Bad desired_capacity: %d", *group.DesiredCapacity)
}
if *group.LaunchConfigurationName == "" {
return fmt.Errorf("Bad launch configuration name: %s", *group.LaunchConfigurationName)
}
t := &autoscaling.TagDescription{
Key: aws.String("Foo"),
Value: aws.String("foo-bar"),
PropagateAtLaunch: aws.Bool(true),
ResourceType: aws.String("auto-scaling-group"),
ResourceId: group.AutoScalingGroupName,
}
if !reflect.DeepEqual(group.Tags[0], t) {
return fmt.Errorf(
"Got:\n\n%#v\n\nExpected:\n\n%#v\n",
group.Tags[0],
t)
}
return nil
}
}
func testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(group *autoscaling.Group) resource.TestCheckFunc {
return func(s *terraform.State) error {
if len(group.LoadBalancerNames) != 1 {
return fmt.Errorf("Bad load_balancers: %v", group.LoadBalancerNames)
}
return nil
}
}
func testAccCheckAWSAutoScalingGroupExists(n string, group *autoscaling.Group) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No AutoScaling Group ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
describeGroups, err := conn.DescribeAutoScalingGroups(
&autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{aws.String(rs.Primary.ID)},
})
if err != nil {
return err
}
if len(describeGroups.AutoScalingGroups) != 1 ||
*describeGroups.AutoScalingGroups[0].AutoScalingGroupName != rs.Primary.ID {
return fmt.Errorf("AutoScaling Group not found")
}
*group = *describeGroups.AutoScalingGroups[0]
return nil
}
}
func testLaunchConfigurationName(n string, lc *autoscaling.LaunchConfiguration) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if *lc.LaunchConfigurationName != rs.Primary.Attributes["launch_configuration"] {
return fmt.Errorf("Launch configuration names do not match")
}
return nil
}
}
func testAccCheckAWSAutoScalingGroupHealthyCapacity(
g *autoscaling.Group, exp int) resource.TestCheckFunc {
return func(s *terraform.State) error {
healthy := 0
for _, i := range g.Instances {
if i.HealthStatus == nil {
continue
}
if strings.EqualFold(*i.HealthStatus, "Healthy") {
healthy++
}
}
if healthy < exp {
return fmt.Errorf("Expected at least %d healthy, got %d.", exp, healthy)
}
return nil
}
}
func testAccCheckAWSAutoScalingGroupAttributesVPCZoneIdentifer(group *autoscaling.Group) resource.TestCheckFunc {
return func(s *terraform.State) error {
// Grab Subnet Ids
var subnets []string
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_subnet" {
continue
}
subnets = append(subnets, rs.Primary.Attributes["id"])
}
if group.VPCZoneIdentifier == nil {
return fmt.Errorf("Bad VPC Zone Identifier\nexpected: %s\ngot nil", subnets)
}
zones := strings.Split(*group.VPCZoneIdentifier, ",")
remaining := len(zones)
for _, z := range zones {
for _, s := range subnets {
if z == s {
remaining--
}
}
}
if remaining != 0 {
return fmt.Errorf("Bad VPC Zone Identifier match\nexpected: %s\ngot:%s", zones, subnets)
}
return nil
}
}
const testAccAWSAutoScalingGroupConfig_autoGeneratedName = `
resource "aws_launch_configuration" "foobar" {
image_id = "ami-21f78e11"
instance_type = "t1.micro"
}
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-west-2a"]
max_size = 1
min_size = 1
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = 1
force_delete = true
termination_policies = ["OldestInstance","ClosestToNextInstanceHour"]
launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "Foo"
value = "foo-bar"
propagate_at_launch = true
}
}
`
const testAccAWSAutoScalingGroupConfig = `
resource "aws_launch_configuration" "foobar" {
image_id = "ami-21f78e11"
instance_type = "t1.micro"
}
resource "aws_placement_group" "test" {
name = "test"
strategy = "cluster"
}
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-west-2a"]
name = "foobar3-terraform-test"
max_size = 5
min_size = 2
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = 4
force_delete = true
termination_policies = ["OldestInstance","ClosestToNextInstanceHour"]
launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "Foo"
value = "foo-bar"
propagate_at_launch = true
}
}
`
const testAccAWSAutoScalingGroupConfigUpdate = `
resource "aws_launch_configuration" "foobar" {
image_id = "ami-21f78e11"
instance_type = "t1.micro"
}
resource "aws_launch_configuration" "new" {
image_id = "ami-21f78e11"
instance_type = "t1.micro"
}
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-west-2a"]
name = "foobar3-terraform-test"
max_size = 5
min_size = 2
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = 5
force_delete = true
termination_policies = ["ClosestToNextInstanceHour"]
launch_configuration = "${aws_launch_configuration.new.name}"
tag {
key = "Bar"
value = "bar-foo"
propagate_at_launch = true
}
}
`
const testAccAWSAutoScalingGroupConfigWithLoadBalancer = `
resource "aws_vpc" "foo" {
cidr_block = "10.1.0.0/16"
tags { Name = "tf-asg-test" }
}
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.foo.id}"
}
resource "aws_subnet" "foo" {
cidr_block = "10.1.1.0/24"
vpc_id = "${aws_vpc.foo.id}"
}
resource "aws_security_group" "foo" {
vpc_id="${aws_vpc.foo.id}"
ingress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elb" "bar" {
subnets = ["${aws_subnet.foo.id}"]
security_groups = ["${aws_security_group.foo.id}"]
listener {
instance_port = 80
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
target = "HTTP:80/"
interval = 5
timeout = 2
}
depends_on = ["aws_internet_gateway.gw"]
}
resource "aws_launch_configuration" "foobar" {
// need an AMI that listens on :80 at boot, this is:
// bitnami-nginxstack-1.6.1-0-linux-ubuntu-14.04.1-x86_64-hvm-ebs-ami-99f5b1a9-3
image_id = "ami-b5b3fc85"
instance_type = "t2.micro"
security_groups = ["${aws_security_group.foo.id}"]
}
resource "aws_autoscaling_group" "bar" {
availability_zones = ["${aws_subnet.foo.availability_zone}"]
vpc_zone_identifier = ["${aws_subnet.foo.id}"]
name = "foobar3-terraform-test"
max_size = 2
min_size = 2
health_check_grace_period = 300
health_check_type = "ELB"
wait_for_elb_capacity = 2
force_delete = true
launch_configuration = "${aws_launch_configuration.foobar.name}"
load_balancers = ["${aws_elb.bar.name}"]
}
`
const testAccAWSAutoScalingGroupConfigWithAZ = `
resource "aws_vpc" "default" {
cidr_block = "10.0.0.0/16"
tags {
Name = "terraform-test"
}
}
resource "aws_subnet" "main" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "10.0.1.0/24"
availability_zone = "us-west-2a"
tags {
Name = "terraform-test"
}
}
resource "aws_subnet" "alt" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "10.0.2.0/24"
availability_zone = "us-west-2b"
tags {
Name = "asg-vpc-thing"
}
}
resource "aws_launch_configuration" "foobar" {
image_id = "ami-b5b3fc85"
instance_type = "t2.micro"
}
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-west-2a"]
max_size = 2
min_size = 1
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = 1
force_delete = true
termination_policies = ["OldestInstance"]
launch_configuration = "${aws_launch_configuration.foobar.name}"
}
`
const testAccAWSAutoScalingGroupConfigWithVPCIdent = `
resource "aws_vpc" "default" {
cidr_block = "10.0.0.0/16"
tags {
Name = "terraform-test"
}
}
resource "aws_subnet" "main" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "10.0.1.0/24"
availability_zone = "us-west-2a"
tags {
Name = "terraform-test"
}
}
resource "aws_subnet" "alt" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "10.0.2.0/24"
availability_zone = "us-west-2b"
tags {
Name = "asg-vpc-thing"
}
}
resource "aws_launch_configuration" "foobar" {
image_id = "ami-b5b3fc85"
instance_type = "t2.micro"
}
resource "aws_autoscaling_group" "bar" {
vpc_zone_identifier = [
"${aws_subnet.main.id}",
"${aws_subnet.alt.id}",
]
max_size = 2
min_size = 1
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = 1
force_delete = true
termination_policies = ["OldestInstance"]
launch_configuration = "${aws_launch_configuration.foobar.name}"
}
`
const testAccAWSAutoScalingGroupConfig_withPlacementGroup = `
resource "aws_launch_configuration" "foobar" {
image_id = "ami-21f78e11"
instance_type = "c3.large"
}
resource "aws_placement_group" "test" {
name = "test"
strategy = "cluster"
}
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-west-2a"]
name = "foobar3-terraform-test"
max_size = 1
min_size = 1
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = 1
force_delete = true
termination_policies = ["OldestInstance","ClosestToNextInstanceHour"]
placement_group = "${aws_placement_group.test.name}"
launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "Foo"
value = "foo-bar"
propagate_at_launch = true
}
}
`
Cleanup unrelated config to speed up AZ/VPC acc tests.
Removes overspecified config that is unrelated to the auto scaling
group's availability zone and VPC identifier acceptance tests. The
created auto scaling groups do not need to spin up any hosts since
the acceptance tests are only concerned with checking the existence
of the associated availability zones and VPC identifiers.
package aws
import (
"fmt"
"reflect"
"regexp"
"strings"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSAutoScalingGroup_basic(t *testing.T) {
var group autoscaling.Group
var lc autoscaling.LaunchConfiguration
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAWSAutoScalingGroupHealthyCapacity(&group, 2),
testAccCheckAWSAutoScalingGroupAttributes(&group),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "availability_zones.2487133097", "us-west-2a"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "name", "foobar3-terraform-test"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "max_size", "5"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "min_size", "2"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "health_check_grace_period", "300"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "health_check_type", "ELB"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "desired_capacity", "4"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "force_delete", "true"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "termination_policies.0", "OldestInstance"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "termination_policies.1", "ClosestToNextInstanceHour"),
),
},
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfigUpdate,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.new", &lc),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "desired_capacity", "5"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "termination_policies.0", "ClosestToNextInstanceHour"),
testLaunchConfigurationName("aws_autoscaling_group.bar", &lc),
testAccCheckAutoscalingTags(&group.Tags, "Bar", map[string]interface{}{
"value": "bar-foo",
"propagate_at_launch": true,
}),
),
},
},
})
}
func TestAccAWSAutoScalingGroup_autoGeneratedName(t *testing.T) {
asgNameRegexp := regexp.MustCompile("^tf-asg-")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfig_autoGeneratedName,
Check: resource.ComposeTestCheckFunc(
resource.TestMatchResourceAttr(
"aws_autoscaling_group.bar", "name", asgNameRegexp),
),
},
},
})
}
func TestAccAWSAutoScalingGroup_tags(t *testing.T) {
var group autoscaling.Group
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAutoscalingTags(&group.Tags, "Foo", map[string]interface{}{
"value": "foo-bar",
"propagate_at_launch": true,
}),
),
},
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfigUpdate,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAutoscalingTagNotExists(&group.Tags, "Foo"),
testAccCheckAutoscalingTags(&group.Tags, "Bar", map[string]interface{}{
"value": "bar-foo",
"propagate_at_launch": true,
}),
),
},
},
})
}
func TestAccAWSAutoScalingGroup_VpcUpdates(t *testing.T) {
var group autoscaling.Group
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfigWithAZ,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "availability_zones.#", "1"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "availability_zones.2487133097", "us-west-2a"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "vpc_zone_identifier.#", "1"),
),
},
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfigWithVPCIdent,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAWSAutoScalingGroupAttributesVPCZoneIdentifer(&group),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "availability_zones.#", "1"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "availability_zones.2487133097", "us-west-2a"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "vpc_zone_identifier.#", "1"),
),
},
},
})
}
func TestAccAWSAutoScalingGroup_WithLoadBalancer(t *testing.T) {
var group autoscaling.Group
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: fmt.Sprintf(testAccAWSAutoScalingGroupConfigWithLoadBalancer),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(&group),
),
},
},
})
}
func TestAccAWSAutoScalingGroup_withPlacementGroup(t *testing.T) {
var group autoscaling.Group
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSAutoScalingGroupConfig_withPlacementGroup,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "placement_group", "test"),
),
},
},
})
}
func testAccCheckAWSAutoScalingGroupDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_autoscaling_group" {
continue
}
// Try to find the Group
describeGroups, err := conn.DescribeAutoScalingGroups(
&autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{aws.String(rs.Primary.ID)},
})
if err == nil {
if len(describeGroups.AutoScalingGroups) != 0 &&
*describeGroups.AutoScalingGroups[0].AutoScalingGroupName == rs.Primary.ID {
return fmt.Errorf("AutoScaling Group still exists")
}
}
// Verify the error
ec2err, ok := err.(awserr.Error)
if !ok {
return err
}
if ec2err.Code() != "InvalidGroup.NotFound" {
return err
}
}
return nil
}
func testAccCheckAWSAutoScalingGroupAttributes(group *autoscaling.Group) resource.TestCheckFunc {
return func(s *terraform.State) error {
if *group.AvailabilityZones[0] != "us-west-2a" {
return fmt.Errorf("Bad availability_zones: %#v", group.AvailabilityZones[0])
}
if *group.AutoScalingGroupName != "foobar3-terraform-test" {
return fmt.Errorf("Bad name: %s", *group.AutoScalingGroupName)
}
if *group.MaxSize != 5 {
return fmt.Errorf("Bad max_size: %d", *group.MaxSize)
}
if *group.MinSize != 2 {
return fmt.Errorf("Bad max_size: %d", *group.MinSize)
}
if *group.HealthCheckType != "ELB" {
return fmt.Errorf("Bad health_check_type,\nexpected: %s\ngot: %s", "ELB", *group.HealthCheckType)
}
if *group.HealthCheckGracePeriod != 300 {
return fmt.Errorf("Bad health_check_grace_period: %d", *group.HealthCheckGracePeriod)
}
if *group.DesiredCapacity != 4 {
return fmt.Errorf("Bad desired_capacity: %d", *group.DesiredCapacity)
}
if *group.LaunchConfigurationName == "" {
return fmt.Errorf("Bad launch configuration name: %s", *group.LaunchConfigurationName)
}
t := &autoscaling.TagDescription{
Key: aws.String("Foo"),
Value: aws.String("foo-bar"),
PropagateAtLaunch: aws.Bool(true),
ResourceType: aws.String("auto-scaling-group"),
ResourceId: group.AutoScalingGroupName,
}
if !reflect.DeepEqual(group.Tags[0], t) {
return fmt.Errorf(
"Got:\n\n%#v\n\nExpected:\n\n%#v\n",
group.Tags[0],
t)
}
return nil
}
}
func testAccCheckAWSAutoScalingGroupAttributesLoadBalancer(group *autoscaling.Group) resource.TestCheckFunc {
return func(s *terraform.State) error {
if len(group.LoadBalancerNames) != 1 {
return fmt.Errorf("Bad load_balancers: %v", group.LoadBalancerNames)
}
return nil
}
}
func testAccCheckAWSAutoScalingGroupExists(n string, group *autoscaling.Group) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No AutoScaling Group ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).autoscalingconn
describeGroups, err := conn.DescribeAutoScalingGroups(
&autoscaling.DescribeAutoScalingGroupsInput{
AutoScalingGroupNames: []*string{aws.String(rs.Primary.ID)},
})
if err != nil {
return err
}
if len(describeGroups.AutoScalingGroups) != 1 ||
*describeGroups.AutoScalingGroups[0].AutoScalingGroupName != rs.Primary.ID {
return fmt.Errorf("AutoScaling Group not found")
}
*group = *describeGroups.AutoScalingGroups[0]
return nil
}
}
func testLaunchConfigurationName(n string, lc *autoscaling.LaunchConfiguration) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if *lc.LaunchConfigurationName != rs.Primary.Attributes["launch_configuration"] {
return fmt.Errorf("Launch configuration names do not match")
}
return nil
}
}
func testAccCheckAWSAutoScalingGroupHealthyCapacity(
g *autoscaling.Group, exp int) resource.TestCheckFunc {
return func(s *terraform.State) error {
healthy := 0
for _, i := range g.Instances {
if i.HealthStatus == nil {
continue
}
if strings.EqualFold(*i.HealthStatus, "Healthy") {
healthy++
}
}
if healthy < exp {
return fmt.Errorf("Expected at least %d healthy, got %d.", exp, healthy)
}
return nil
}
}
func testAccCheckAWSAutoScalingGroupAttributesVPCZoneIdentifer(group *autoscaling.Group) resource.TestCheckFunc {
return func(s *terraform.State) error {
// Grab Subnet Ids
var subnets []string
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_subnet" {
continue
}
subnets = append(subnets, rs.Primary.Attributes["id"])
}
if group.VPCZoneIdentifier == nil {
return fmt.Errorf("Bad VPC Zone Identifier\nexpected: %s\ngot nil", subnets)
}
zones := strings.Split(*group.VPCZoneIdentifier, ",")
remaining := len(zones)
for _, z := range zones {
for _, s := range subnets {
if z == s {
remaining--
}
}
}
if remaining != 0 {
return fmt.Errorf("Bad VPC Zone Identifier match\nexpected: %s\ngot:%s", zones, subnets)
}
return nil
}
}
const testAccAWSAutoScalingGroupConfig_autoGeneratedName = `
resource "aws_launch_configuration" "foobar" {
image_id = "ami-21f78e11"
instance_type = "t1.micro"
}
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-west-2a"]
max_size = 1
min_size = 1
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = 1
force_delete = true
termination_policies = ["OldestInstance","ClosestToNextInstanceHour"]
launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "Foo"
value = "foo-bar"
propagate_at_launch = true
}
}
`
const testAccAWSAutoScalingGroupConfig = `
resource "aws_launch_configuration" "foobar" {
image_id = "ami-21f78e11"
instance_type = "t1.micro"
}
resource "aws_placement_group" "test" {
name = "test"
strategy = "cluster"
}
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-west-2a"]
name = "foobar3-terraform-test"
max_size = 5
min_size = 2
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = 4
force_delete = true
termination_policies = ["OldestInstance","ClosestToNextInstanceHour"]
launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "Foo"
value = "foo-bar"
propagate_at_launch = true
}
}
`
const testAccAWSAutoScalingGroupConfigUpdate = `
resource "aws_launch_configuration" "foobar" {
image_id = "ami-21f78e11"
instance_type = "t1.micro"
}
resource "aws_launch_configuration" "new" {
image_id = "ami-21f78e11"
instance_type = "t1.micro"
}
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-west-2a"]
name = "foobar3-terraform-test"
max_size = 5
min_size = 2
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = 5
force_delete = true
termination_policies = ["ClosestToNextInstanceHour"]
launch_configuration = "${aws_launch_configuration.new.name}"
tag {
key = "Bar"
value = "bar-foo"
propagate_at_launch = true
}
}
`
const testAccAWSAutoScalingGroupConfigWithLoadBalancer = `
resource "aws_vpc" "foo" {
cidr_block = "10.1.0.0/16"
tags { Name = "tf-asg-test" }
}
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.foo.id}"
}
resource "aws_subnet" "foo" {
cidr_block = "10.1.1.0/24"
vpc_id = "${aws_vpc.foo.id}"
}
resource "aws_security_group" "foo" {
vpc_id="${aws_vpc.foo.id}"
ingress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_elb" "bar" {
subnets = ["${aws_subnet.foo.id}"]
security_groups = ["${aws_security_group.foo.id}"]
listener {
instance_port = 80
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
health_check {
healthy_threshold = 2
unhealthy_threshold = 2
target = "HTTP:80/"
interval = 5
timeout = 2
}
depends_on = ["aws_internet_gateway.gw"]
}
resource "aws_launch_configuration" "foobar" {
// need an AMI that listens on :80 at boot, this is:
// bitnami-nginxstack-1.6.1-0-linux-ubuntu-14.04.1-x86_64-hvm-ebs-ami-99f5b1a9-3
image_id = "ami-b5b3fc85"
instance_type = "t2.micro"
security_groups = ["${aws_security_group.foo.id}"]
}
resource "aws_autoscaling_group" "bar" {
availability_zones = ["${aws_subnet.foo.availability_zone}"]
vpc_zone_identifier = ["${aws_subnet.foo.id}"]
name = "foobar3-terraform-test"
max_size = 2
min_size = 2
health_check_grace_period = 300
health_check_type = "ELB"
wait_for_elb_capacity = 2
force_delete = true
launch_configuration = "${aws_launch_configuration.foobar.name}"
load_balancers = ["${aws_elb.bar.name}"]
}
`
const testAccAWSAutoScalingGroupConfigWithAZ = `
resource "aws_vpc" "default" {
cidr_block = "10.0.0.0/16"
tags {
Name = "terraform-test"
}
}
resource "aws_subnet" "main" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "10.0.1.0/24"
availability_zone = "us-west-2a"
tags {
Name = "terraform-test"
}
}
resource "aws_launch_configuration" "foobar" {
image_id = "ami-b5b3fc85"
instance_type = "t2.micro"
}
resource "aws_autoscaling_group" "bar" {
availability_zones = [
"us-west-2a"
]
desired_capacity = 0
max_size = 0
min_size = 0
launch_configuration = "${aws_launch_configuration.foobar.name}"
}
`
const testAccAWSAutoScalingGroupConfigWithVPCIdent = `
resource "aws_vpc" "default" {
cidr_block = "10.0.0.0/16"
tags {
Name = "terraform-test"
}
}
resource "aws_subnet" "main" {
vpc_id = "${aws_vpc.default.id}"
cidr_block = "10.0.1.0/24"
availability_zone = "us-west-2a"
tags {
Name = "terraform-test"
}
}
resource "aws_launch_configuration" "foobar" {
image_id = "ami-b5b3fc85"
instance_type = "t2.micro"
}
resource "aws_autoscaling_group" "bar" {
vpc_zone_identifier = [
"${aws_subnet.main.id}",
]
desired_capacity = 0
max_size = 0
min_size = 0
launch_configuration = "${aws_launch_configuration.foobar.name}"
}
`
const testAccAWSAutoScalingGroupConfig_withPlacementGroup = `
resource "aws_launch_configuration" "foobar" {
image_id = "ami-21f78e11"
instance_type = "c3.large"
}
resource "aws_placement_group" "test" {
name = "test"
strategy = "cluster"
}
resource "aws_autoscaling_group" "bar" {
availability_zones = ["us-west-2a"]
name = "foobar3-terraform-test"
max_size = 1
min_size = 1
health_check_grace_period = 300
health_check_type = "ELB"
desired_capacity = 1
force_delete = true
termination_policies = ["OldestInstance","ClosestToNextInstanceHour"]
placement_group = "${aws_placement_group.test.name}"
launch_configuration = "${aws_launch_configuration.foobar.name}"
tag {
key = "Foo"
value = "foo-bar"
propagate_at_launch = true
}
}
`
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.