CombinedText stringlengths 4 3.42M |
|---|
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcproxy
import (
"io"
"sync"
"golang.org/x/net/context"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver/api/v3rpc"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
type watchProxy struct {
cw clientv3.Watcher
wgs watchergroups
mu sync.Mutex
nextStreamID int64
}
func NewWatchProxy(c *clientv3.Client) pb.WatchServer {
return &watchProxy{
cw: c.Watcher,
wgs: watchergroups{
cw: c.Watcher,
groups: make(map[watchRange]*watcherGroup),
},
}
}
func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) {
wp.mu.Lock()
wp.nextStreamID++
wp.mu.Unlock()
sws := serverWatchStream{
cw: wp.cw,
groups: &wp.wgs,
id: wp.nextStreamID,
gRPCStream: stream,
ctrlCh: make(chan *pb.WatchResponse, 10),
watchCh: make(chan *pb.WatchResponse, 10),
}
go sws.recvLoop()
sws.sendLoop()
return nil
}
type serverWatchStream struct {
id int64
cw clientv3.Watcher
mu sync.Mutex // make sure any access of groups and singles is atomic
groups *watchergroups
singles map[int64]*watcherSingle
gRPCStream pb.Watch_WatchServer
ctrlCh chan *pb.WatchResponse
watchCh chan *pb.WatchResponse
nextWatcherID int64
}
func (sws *serverWatchStream) close() {
close(sws.watchCh)
close(sws.ctrlCh)
for _, ws := range sws.singles {
ws.stop()
}
sws.groups.stop()
}
func (sws *serverWatchStream) recvLoop() error {
defer sws.close()
for {
req, err := sws.gRPCStream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
switch uv := req.RequestUnion.(type) {
case *pb.WatchRequest_CreateRequest:
cr := uv.CreateRequest
watcher := watcher{
wr: watchRange{
key: string(cr.Key),
end: string(cr.RangeEnd),
},
id: sws.nextWatcherID,
ch: sws.watchCh,
progress: cr.ProgressNotify,
filters: v3rpc.FiltersFromRequest(cr),
}
if cr.StartRevision != 0 {
sws.addDedicatedWatcher(watcher, cr.StartRevision)
} else {
sws.addCoalescedWatcher(watcher)
}
sws.nextWatcherID++
case *pb.WatchRequest_CancelRequest:
sws.removeWatcher(uv.CancelRequest.WatchId)
default:
panic("not implemented")
}
}
}
func (sws *serverWatchStream) sendLoop() {
for {
select {
case wresp, ok := <-sws.watchCh:
if !ok {
return
}
if err := sws.gRPCStream.Send(wresp); err != nil {
return
}
case c, ok := <-sws.ctrlCh:
if !ok {
return
}
if err := sws.gRPCStream.Send(c); err != nil {
return
}
}
}
}
func (sws *serverWatchStream) addCoalescedWatcher(w watcher) {
sws.mu.Lock()
defer sws.mu.Unlock()
rid := receiverID{streamID: sws.id, watcherID: w.id}
sws.groups.addWatcher(rid, w)
}
func (sws *serverWatchStream) addDedicatedWatcher(w watcher, rev int64) {
sws.mu.Lock()
defer sws.mu.Unlock()
ctx, cancel := context.WithCancel(context.Background())
wch := sws.cw.Watch(ctx,
w.wr.key, clientv3.WithRange(w.wr.end),
clientv3.WithRev(rev),
clientv3.WithProgressNotify(),
clientv3.WithCreatedNotify(),
)
ws := newWatcherSingle(wch, cancel, w, sws)
sws.singles[w.id] = ws
go ws.run()
}
func (sws *serverWatchStream) maybeCoalesceWatcher(ws watcherSingle) bool {
sws.mu.Lock()
defer sws.mu.Unlock()
rid := receiverID{streamID: sws.id, watcherID: ws.w.id}
if sws.groups.maybeJoinWatcherSingle(rid, ws) {
delete(sws.singles, ws.w.id)
return true
}
return false
}
func (sws *serverWatchStream) removeWatcher(id int64) {
sws.mu.Lock()
defer sws.mu.Unlock()
if sws.groups.removeWatcher(receiverID{streamID: sws.id, watcherID: id}) {
return
}
if ws, ok := sws.singles[id]; ok {
delete(sws.singles, id)
ws.stop()
}
}
proxy/grpcproxy: fix nil-map assign to 'singles'
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcproxy
import (
"io"
"sync"
"golang.org/x/net/context"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/etcdserver/api/v3rpc"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
)
type watchProxy struct {
cw clientv3.Watcher
wgs watchergroups
mu sync.Mutex
nextStreamID int64
}
func NewWatchProxy(c *clientv3.Client) pb.WatchServer {
return &watchProxy{
cw: c.Watcher,
wgs: watchergroups{
cw: c.Watcher,
groups: make(map[watchRange]*watcherGroup),
},
}
}
func (wp *watchProxy) Watch(stream pb.Watch_WatchServer) (err error) {
wp.mu.Lock()
wp.nextStreamID++
wp.mu.Unlock()
sws := serverWatchStream{
cw: wp.cw,
groups: &wp.wgs,
singles: make(map[int64]*watcherSingle),
id: wp.nextStreamID,
gRPCStream: stream,
ctrlCh: make(chan *pb.WatchResponse, 10),
watchCh: make(chan *pb.WatchResponse, 10),
}
go sws.recvLoop()
sws.sendLoop()
return nil
}
type serverWatchStream struct {
id int64
cw clientv3.Watcher
mu sync.Mutex // make sure any access of groups and singles is atomic
groups *watchergroups
singles map[int64]*watcherSingle
gRPCStream pb.Watch_WatchServer
ctrlCh chan *pb.WatchResponse
watchCh chan *pb.WatchResponse
nextWatcherID int64
}
func (sws *serverWatchStream) close() {
close(sws.watchCh)
close(sws.ctrlCh)
for _, ws := range sws.singles {
ws.stop()
}
sws.groups.stop()
}
func (sws *serverWatchStream) recvLoop() error {
defer sws.close()
for {
req, err := sws.gRPCStream.Recv()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
switch uv := req.RequestUnion.(type) {
case *pb.WatchRequest_CreateRequest:
cr := uv.CreateRequest
watcher := watcher{
wr: watchRange{
key: string(cr.Key),
end: string(cr.RangeEnd),
},
id: sws.nextWatcherID,
ch: sws.watchCh,
progress: cr.ProgressNotify,
filters: v3rpc.FiltersFromRequest(cr),
}
if cr.StartRevision != 0 {
sws.addDedicatedWatcher(watcher, cr.StartRevision)
} else {
sws.addCoalescedWatcher(watcher)
}
sws.nextWatcherID++
case *pb.WatchRequest_CancelRequest:
sws.removeWatcher(uv.CancelRequest.WatchId)
default:
panic("not implemented")
}
}
}
func (sws *serverWatchStream) sendLoop() {
for {
select {
case wresp, ok := <-sws.watchCh:
if !ok {
return
}
if err := sws.gRPCStream.Send(wresp); err != nil {
return
}
case c, ok := <-sws.ctrlCh:
if !ok {
return
}
if err := sws.gRPCStream.Send(c); err != nil {
return
}
}
}
}
func (sws *serverWatchStream) addCoalescedWatcher(w watcher) {
sws.mu.Lock()
defer sws.mu.Unlock()
rid := receiverID{streamID: sws.id, watcherID: w.id}
sws.groups.addWatcher(rid, w)
}
func (sws *serverWatchStream) addDedicatedWatcher(w watcher, rev int64) {
sws.mu.Lock()
defer sws.mu.Unlock()
ctx, cancel := context.WithCancel(context.Background())
wch := sws.cw.Watch(ctx,
w.wr.key, clientv3.WithRange(w.wr.end),
clientv3.WithRev(rev),
clientv3.WithProgressNotify(),
clientv3.WithCreatedNotify(),
)
ws := newWatcherSingle(wch, cancel, w, sws)
sws.singles[w.id] = ws
go ws.run()
}
func (sws *serverWatchStream) maybeCoalesceWatcher(ws watcherSingle) bool {
sws.mu.Lock()
defer sws.mu.Unlock()
rid := receiverID{streamID: sws.id, watcherID: ws.w.id}
if sws.groups.maybeJoinWatcherSingle(rid, ws) {
delete(sws.singles, ws.w.id)
return true
}
return false
}
func (sws *serverWatchStream) removeWatcher(id int64) {
sws.mu.Lock()
defer sws.mu.Unlock()
if sws.groups.removeWatcher(receiverID{streamID: sws.id, watcherID: id}) {
return
}
if ws, ok := sws.singles[id]; ok {
delete(sws.singles, id)
ws.stop()
}
}
|
package main
// #include <libpe/include/libpe/pe.h>
// #include <libpe/include/libpe/hashes.h>
// #include <libpe/include/libpe/misc.h>
// #include <libpe/include/libpe/imports.h>
// #include <libpe/include/libpe/exports.h>
// #include <libpe/include/libpe/peres.h>
// #cgo LDFLAGS: -lpe -lssl -lcrypto -lm
// #cgo CFLAGS: -std=c99
import "C"
import (
//Imports for measuring execution time of requests
"time"
//Imports for reading the config, logging and command line argument parsing.
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"unsafe"
//Imports for serving on a socket and handling routing of incoming request.
"encoding/json"
"github.com/julienschmidt/httprouter"
"net/http"
// Import to reduce service memory footprint
"runtime/debug"
)
type Result struct {
Headers Header `json:"Headers"`
Directories []*Directory `json:"directories"`
Directories_count int `json:"directories_count"`
Sections []*Section `json:"sections"`
Sections_count int `json:"sectionscount"`
PEHashes Hashes `json:"PEHash"`
Exports []*Export `json:"Exports"`
Imports []Import `json:"Imports"`
Resources Resource `json:"resources"`
Entrophy float32 `json:"Entrophy"`
FPUTrick bool `json:"FPUtrick"`
CPLAnalysis int `json:"CPLAnalysis"` // 0 -> No Threat, 1 -> Malware, -1 -> Not a dll.
CheckFakeEntryPoint int `json:"CheckFakeEntrypoint"` // 0 -> Normal, 1 -> fake, -1 -> null.
}
type Resource struct {
ResourceDirectory []*RDT_RESOURCE_DIRECTORY `json:"RESOURCE_DIRECTORY"`
DirectoryEntry []*RDT_DIRECTORY_ENTRY `json:"DIRECTORY_ENTRY"`
DataString []*RDT_DATA_STRING `json:"DATA_STRING"`
DataEntry []*RDT_DATA_ENTRY `json:"DATA_ENTRY"`
}
type RDT_RESOURCE_DIRECTORY struct {
NodeType int `json:"NodeType"`
Characteristics int `json:"Characteristics"`
TimeDateStamp int `json:"TimeDateStamp"`
MajorVersion int `json:"MajorVersion"`
MinorVersion int `json:"MinorVersion"`
NumberOfNamedEntries int `json:"NumberOfNamedEntries"`
NumberOfIdEntries int `json:"NumberOfIdEntries"`
}
type RDT_DIRECTORY_ENTRY struct {
NodeType int `json:"NodeType"`
NameOffset int `json:"NameOffset"`
NameIsString int `json:"NameIsString"`
OffsetIsDirectory int `json:"OffsetIsDirectory"`
DataIsDirectory int `json:"DataIsDirectory"`
}
type RDT_DATA_STRING struct {
NodeType int `json"NodeType"`
Strlen int `json:"Strlen"`
String int `json:"String"`
}
type RDT_DATA_ENTRY struct {
NodeType int `json:"NodeType"`
OffsetToData int `json:"OffsetToData"`
Size int `json:"Size"`
CodePage int `json:"CodePage"`
Reserved int `json:"Reserved"`
}
type Import struct {
Dllname string `json:"DllName"`
Functions []string `json:"Functions"`
}
type Export struct {
Addr string `json:"Addr"`
FunctionName string `json:"FunctionName"`
}
type Header struct {
Optional OptionalHeaders `json:"Optional"`
Dos DosHeaders `json:"DosHeaders"`
Coff CoffHeaders `json:"CoffHeaders"`
}
type OptionalHeaders struct {
Magic int `json:"Magic"`
MajorLinkerVersion int `json:"MajorLinkerVersion"`
MinorLinkerVersion int `json:"MinorLinkerVersion"`
SizeOfCode int `json:"SizeOfCode"`
SizeOfInitializedData int `json:"SizeOfUninitializedData"`
SizeOfUninitializedData int `json:"SizeOfUninitializedData"`
AddressOfEntryPoint int `json:"AddressOfEntryPoint"`
BaseOfCode int `json:"BaseOfCode"`
ImageBase int `json:"ImageBase"`
SectionAlignment int `json:"SectionAlignment"`
FileAlignment int `json:"FileAlignment"`
MajorOperatingSystemVersion int `json:"MajorOperatingSystemVersion"`
MinorOperatingSystemVersion int `json:"MinorOperatingSystemVersion"`
MajorImageVersion int `json:"MajorImageVersion"`
MinorImageVersion int `json:"MinorImageVersion"`
MajorSubsystemVersion int `json:"MajorSubsystemVersion"`
MinorSubsystemVersion int `json:"MinorSubsystemVersion"`
Reserved1 int `json:Reserved1"`
SizeOfImage int `json:"SizeOfImage"`
SizeOfHeaders int `json:"SizeOfHeaders"`
CheckSum int `json:"CheckSum"`
Subsystem int `json:"Subsystem"`
DllCharacteristics int `json:"DllCharacteristics"`
SizeOfStackReserve int `json:"SizeOfStackReserve"`
SizeOfStackCommit int `json:"SizeOfStackCommit"`
SizeOfHeapReserve int `json:"SizeOfHeapReserve"`
SizeOfHeapCommit int `json:"SizeOfHeapCommit"`
LoaderFlags int `json:"LoaderFlags"`
NumberOfRvaAndSizes int `json:"NumberOfRvaAndSizes"`
}
type DosHeaders struct {
Magic int `json:"e_magic"` // Magic Number
Cblp int `json:"e_cblp"`
Cp int `json:"e_cblp"`
Crlc int `json:"e_crlc"`
Cparhdr int `json:"e_cparhdr"`
Minalloc int `json:"e_minalloc"`
Maxalloc int `json:"e_maxalloc"`
Ss int `json:"e_ss"`
Sp int `json:"e_sp"`
Csum int `json:"e_csum"`
Ip int `json:"e_ip"`
Cs int `json:"e_cs"`
Lfarlc int `json:"e_lfarlc"`
Ovno int `json:"e_ovno"`
Res int `json:"e_res"`
Oemid int `json:"e_oemid"`
Oeminfo int `json:"e_oeminfo"`
Res2 int `json:"e_res2"`
Lfanew int `json:"e_lfanew"`
}
type CoffHeaders struct {
Machine string `json:"Machine"`
NumberOfSections string `json:"NumberOfSections"`
TimeDateStamp string `json:"TimeDateStamp"`
PointerToSymbolTable string `json:"PointerToSymbolTable"`
NumberOfSymbols string `json:"NumberOfSymbols"`
SizeOfOptionalHeader string `json:"SizeOfOptionalHeader"`
Characteristics string `json:"Characteristics"`
}
type Directory struct {
Name string `json:"Name"`
VirtualAddress string `json:"VirtualAddress"`
Size int `json:"Size"`
}
type Section struct {
Name string `json:"Name"`
VirtualAddress string `json:"VirtualAddress"`
PointerToRawData string `json:"PointerToRawData"`
NumberOfRelocations int `json:"NumberOfRelocations"`
Characteristics string `json:"Characteristics"`
VirtualSize int `json"VirtualSize"`
SizeOfRawData int `json:"SizeOfRawData"`
}
type Hash struct {
Name string `json:"Name"`
Md5 string `json:"md5"`
Sha1 string `json:"sha1"`
Sha256 string `json:"sha256"`
Ssdeep string `json:"ssdeep"`
}
type Hashes struct {
Headers [3]Hash `json:"Headers"` // Only 3 Headers : dos, coff, optional
Sections []*Hash `json:"Sections"`
FileHash Hash `json:"PEFile"`
Imphash string `json"Imphash"`
}
type Metadata struct {
Name string
Version string
Description string
Copyright string
License string
}
// config structs
type Setting struct {
HTTPBinding string `json:"HTTPBinding"`
}
type Config struct {
Settings Setting `json:"settings"`
}
var (
config *Config
info *log.Logger
metadata Metadata = Metadata{
Name: "pemta",
Version: "0.5.0", //we using a boddumanohar's testing branch as dependency
Description: "./README.md",
Copyright: "Copyright 2017 Holmes Group LLC",
License: "./LICENSE",
}
)
func main() {
var (
err error
configPath string
)
// setup logging
info = log.New(os.Stdout, "", log.Ltime|log.Lshortfile)
// load config
flag.StringVar(&configPath, "config", "", "Path to the configuration file")
flag.Parse()
config, err = load_config(configPath)
if err != nil {
log.Fatalln("Couldn't decode config file without errors!", err.Error())
}
// setup http handlers
router := httprouter.New()
router.GET("/analyze/", handler_analyze)
router.GET("/", handler_info)
info.Printf("Binding to %s\n", config.Settings.HTTPBinding)
log.Fatal(http.ListenAndServe(config.Settings.HTTPBinding, router))
}
// Parse a configuration file into a Config structure.
func load_config(configPath string) (*Config, error) {
config := &Config{}
// if no path is supplied look in the current dir
if configPath == "" {
configPath, _ = filepath.Abs(filepath.Dir(os.Args[0]))
configPath += "/service.conf"
}
cfile, _ := os.Open(configPath)
if err := json.NewDecoder(cfile).Decode(&config); err != nil {
return config, err
}
if metadata.Description != "" {
if data, err := ioutil.ReadFile(string(metadata.Description)); err == nil {
metadata.Description = strings.Replace(string(data), "\n", "<br>", -1)
}
}
if metadata.License != "" {
if data, err := ioutil.ReadFile(string(metadata.License)); err == nil {
metadata.License = strings.Replace(string(data), "\n", "<br>", -1)
}
}
return config, nil
}
func handler_info(f_response http.ResponseWriter, r *http.Request, ps httprouter.Params) {
fmt.Fprintf(f_response, `<p>%s - %s</p>
<hr>
<p>%s</p>
<hr>
<p>%s</p>
`,
metadata.Name,
metadata.Version,
metadata.Description,
metadata.License)
}
func handler_analyze(f_response http.ResponseWriter, request *http.Request, params httprouter.Params) {
// ms-xy: calling FreeOSMemory manually drastically reduces the memory
// footprint at the cost of a little bit of cpu efficiency (due to gc runs
// after every call to handler_analyze)
defer debug.FreeOSMemory()
info.Println("Serving request:", request)
start_time := time.Now()
obj := request.URL.Query().Get("obj")
if obj == "" {
http.Error(f_response, "Missing argument 'obj'", 400)
return
}
sample_path := "/tmp/" + obj
if _, err := os.Stat(sample_path); os.IsNotExist(err) {
http.NotFound(f_response, request)
info.Printf("Error accessing sample (file: %s):", sample_path)
info.Println(err)
return
}
var err C.pe_err_e
var ctx C.pe_ctx_t
cstr := C.CString(sample_path)
// defer C.free(unsafe.Pointer(cstr))
err = C.pe_load_file(&ctx, cstr)
if err != C.LIBPE_E_OK {
C.pe_error_print(C.stderr, err)
return
}
err = C.pe_parse(&ctx)
if err != C.LIBPE_E_OK {
C.pe_error_print(C.stderr, err)
return
}
if !C.pe_is_pe(&ctx) {
return
}
result := &Result{}
// wg := &sync.WaitGroup{}
// wg.Add(15)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
result = header_coff(ctx, result)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
result = header_dos(ctx, result)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
result = header_optional(ctx, result) //cannot support optional headers because Golang reject incompactable field allignment.
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
result.Directories_count = header_directories_count(ctx)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
result = header_directories(ctx, result)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
result = header_sections(ctx, result)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
result.Sections_count = header_sections_count(ctx)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
// fmt.Println("hashes here")
result = get_hashes(ctx, result)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
// fmt.Println("exports here");
result = get_exports(ctx, result)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
// fmt.Println("imports here")
result = get_imports(ctx, result)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
// fmt.Println("resources here")
result = get_resources(ctx, result)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
result.Entrophy = get_entrophy_file(ctx)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
fmt.Println("getting fpu trick");
result.FPUTrick = get_fputrick(ctx)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
result.CPLAnalysis = get_cpl_analysis(ctx)
// }(wg)
// go func(wg *sync.WaitGroup) {
// defer wg.Done()
result.CheckFakeEntryPoint = check_fake_entrypoint(ctx)
// }(wg)
// wg.Wait()
f_response.Header().Set("Content-Type", "text/json; charset=utf-8")
json2http := json.NewEncoder(f_response)
if err := json2http.Encode(result); err != nil {
http.Error(f_response, "Generating JSON failed", 500)
info.Println("JSON encoding failed", err.Error())
return
}
elapsed_time := time.Since(start_time)
info.Printf("Done, total time elapsed %s.\n", elapsed_time)
}
func get_resources(ctx C.pe_ctx_t, temp_result *Result) *Result {
resources_count := C.get_resources_count(&ctx)
resources := C.get_resources(&ctx)
defer C.pe_dealloc_peres(resources)
res_count := int(resources_count.resourcesDirectory)
dirEntry_count := int(resources_count.directoryEntry)
dataString_count := int(resources_count.dataString)
dataEntry_count := int(resources_count.dataEntry)
if resources.err != C.LIBPE_E_OK {
// TODO:Return an error code?? So that totem get notified about this particular error?
return temp_result;
}
temp_result.Resources.ResourceDirectory = make([]*RDT_RESOURCE_DIRECTORY, res_count)
temp_result.Resources.DirectoryEntry = make([]*RDT_DIRECTORY_ENTRY, dirEntry_count)
temp_result.Resources.DataString = make([]*RDT_DATA_STRING, dataString_count)
temp_result.Resources.DataEntry = make([]*RDT_DATA_ENTRY, dataEntry_count)
resourcesDirectory := (*[1 << 30](C.type_RDT_RESOURCE_DIRECTORY))(unsafe.Pointer(resources.resourcesDirectory))[:res_count:res_count]
for i := 0; i < res_count; i++ {
temp_result.Resources.ResourceDirectory[i] = &RDT_RESOURCE_DIRECTORY{
NodeType: int(resourcesDirectory[i].NodeType),
Characteristics: int(resourcesDirectory[i].Characteristics),
TimeDateStamp: int(resourcesDirectory[i].TimeDateStamp),
MajorVersion: int(resourcesDirectory[i].MajorVersion),
MinorVersion: int(resourcesDirectory[i].MinorVersion),
NumberOfNamedEntries: int(resourcesDirectory[i].NumberOfNamedEntries),
NumberOfIdEntries: int(resourcesDirectory[i].NumberOfIdEntries),
}
}
directoryEntry := (*[1 << 30](C.type_RDT_DIRECTORY_ENTRY))(unsafe.Pointer(resources.directoryEntry))[:dirEntry_count:dirEntry_count]
for i := 0; i < dirEntry_count; i++ {
temp_result.Resources.DirectoryEntry[i] = &RDT_DIRECTORY_ENTRY{
NodeType: int(directoryEntry[i].NodeType),
NameOffset: int(directoryEntry[i].NameOffset),
NameIsString: int(directoryEntry[i].NameIsString),
OffsetIsDirectory: int(directoryEntry[i].OffsetIsDirectory),
DataIsDirectory: int(directoryEntry[i].DataIsDirectory),
}
}
dataString := (*[1 << 30](C.type_RDT_DATA_STRING))(unsafe.Pointer(resources.dataString))[:dataString_count:dataString_count]
for i := 0; i < dataString_count; i++ {
temp_result.Resources.DataString[i] = &RDT_DATA_STRING{
NodeType: int(dataString[i].NodeType),
Strlen: int(dataString[i].Strlen),
String: int(dataString[i].String),
}
}
dataEntry := (*[1 << 30](C.type_RDT_DATA_ENTRY))(unsafe.Pointer(resources.dataEntry))[:dirEntry_count:dirEntry_count]
for i := 0; i < dataEntry_count; i++ {
temp_result.Resources.DataEntry[i] = &RDT_DATA_ENTRY{
NodeType: int(dataEntry[i].NodeType),
OffsetToData: int(dataEntry[i].OffsetToData),
Size: int(dataEntry[i].Size),
CodePage: int(dataEntry[i].CodePage),
Reserved: int(dataEntry[i].Reserved),
}
}
return temp_result
}
func get_imports(ctx C.pe_ctx_t, temp_result *Result) *Result {
imports := C.pe_get_imports(&ctx)
defer C.pe_dealloc_imports(imports);
if imports.err != C.LIBPE_E_OK {
//TODO: Return a error code so that TOTEM gets notifed about this particular error
return temp_result;
}
dll_count := int(imports.dll_count)
if dll_count == 0 {
return temp_result
}
fmt.Println(dll_count)
dlls := (*[1 << 30](C.pe_imported_dll_t))(unsafe.Pointer(imports.dlls))[:dll_count:dll_count]
temp_result.Imports = make([]Import, dll_count)
// converting c array into Go slices because indexing of C arrays in not possible in Go.
///dllnames := (*[1 << 30](*C.char))(unsafe.Pointer(imports.dlls.name))[:dll_count:dll_count]
for i := 0; i < dll_count; i++ {
temp_result.Imports[i].Dllname = C.GoString(dlls[i].name)
functions_count := int(dlls[i].functions_count)
dll_functions := (*[1 << 30](C.pe_imported_function_t))(unsafe.Pointer(dlls[i].functions))[:functions_count:functions_count]
// function_names := (*[1 << 30](*C.char))(unsafe.Pointer(dll_functions[i].name))[:functions_count:functions_count]
temp_result.Imports[i].Functions = make([]string, functions_count)
for j := 0; j < functions_count; j++ {
temp_result.Imports[i].Functions[j] = C.GoString(dll_functions[j].name)
}
}
return temp_result
}
func check_fake_entrypoint(ctx C.pe_ctx_t) int {
fake := C.pe_has_fake_entrypoint(&ctx)
return int(fake)
}
func get_cpl_analysis(ctx C.pe_ctx_t) int {
cpl := C.pe_get_cpl_analysis(&ctx)
return int(cpl)
}
func get_fputrick(ctx C.pe_ctx_t) bool {
detected := C.pe_fpu_trick(&ctx)
return bool(detected)
}
func get_entrophy_file(ctx C.pe_ctx_t) float32 {
info.Println("calculating entrophy")
entrophy := C.pe_calculate_entropy_file(&ctx)
return float32(entrophy)
}
func get_exports(ctx C.pe_ctx_t, temp_result *Result) *Result {
exports := C.pe_get_exports(&ctx)
functions_count := int(exports.functions_count)
defer C.pe_dealloc_exports(exports)
if exports.err != C.LIBPE_E_OK {
// TODO: Return an HTTP error code so that totem gets notifed about the error?
return temp_result
}
if functions_count == 0 {
return temp_result
}
exports_functions := (*[1 << 30](*C.pe_exported_function_t))(unsafe.Pointer(exports.functions))[:functions_count:functions_count] // converting c array into Go slices
temp_result.Exports = make([]*Export, functions_count)
for i := 0; i < functions_count; i++ {
temp_result.Exports[i] = &Export{
Addr: fmt.Sprintf("%X", exports_functions[i].addr),
FunctionName: C.GoString(exports_functions[i].name),
}
}
return temp_result
}
func header_coff(ctx C.pe_ctx_t, temp_result *Result) *Result {
coff := C.pe_coff(&ctx)
temp_result.Headers.Coff.Machine = fmt.Sprintf("%X", int(coff.Machine))
temp_result.Headers.Coff.NumberOfSections = fmt.Sprintf("%X", int(coff.NumberOfSections))
timestamp := getTimestamp(int(coff.TimeDateStamp))
temp_result.Headers.Coff.TimeDateStamp = fmt.Sprintf("%s", timestamp)
temp_result.Headers.Coff.PointerToSymbolTable = fmt.Sprintf("%X", int(coff.PointerToSymbolTable))
temp_result.Headers.Coff.NumberOfSymbols = fmt.Sprintf("%X", int(coff.NumberOfSymbols))
temp_result.Headers.Coff.SizeOfOptionalHeader = fmt.Sprintf("%X", int(coff.SizeOfOptionalHeader))
temp_result.Headers.Coff.Characteristics = fmt.Sprintf("%X", int(coff.Characteristics))
return temp_result
}
func header_dos(ctx C.pe_ctx_t, temp_result *Result) *Result {
dos := C.pe_dos(&ctx)
temp_result.Headers.Dos.Magic = int(dos.e_magic)
temp_result.Headers.Dos.Cblp = int(dos.e_cblp)
temp_result.Headers.Dos.Cp = int(dos.e_cp)
temp_result.Headers.Dos.Crlc = int(dos.e_crlc)
temp_result.Headers.Dos.Cparhdr = int(dos.e_cparhdr)
temp_result.Headers.Dos.Minalloc = int(dos.e_minalloc)
temp_result.Headers.Dos.Maxalloc = int(dos.e_maxalloc)
temp_result.Headers.Dos.Ss = int(dos.e_ss)
temp_result.Headers.Dos.Sp = int(dos.e_sp)
temp_result.Headers.Dos.Csum = int(dos.e_csum)
temp_result.Headers.Dos.Ip = int(dos.e_ip)
temp_result.Headers.Dos.Cs = int(dos.e_cs)
temp_result.Headers.Dos.Lfarlc = int(dos.e_lfarlc)
temp_result.Headers.Dos.Ovno = int(dos.e_ovno)
temp_result.Headers.Dos.Res = int(dos.e_res[3])
temp_result.Headers.Dos.Oemid = int(dos.e_oemid)
temp_result.Headers.Dos.Oeminfo = int(dos.e_oeminfo)
temp_result.Headers.Dos.Res2 = int(dos.e_res2[9])
temp_result.Headers.Dos.Lfanew = int(dos.e_lfanew)
return temp_result
}
func header_optional(ctx C.pe_ctx_t, temp_result *Result) *Result {
optional := C.pe_optional(&ctx)
if optional._type == C.MAGIC_PE32 {
//fmt.Println(optional._32.Magic)
temp_result.Headers.Optional.Magic = int(optional._32.Magic)
temp_result.Headers.Optional.MajorLinkerVersion = int(optional._32.MajorLinkerVersion)
temp_result.Headers.Optional.MinorLinkerVersion = int(optional._32.MinorLinkerVersion)
temp_result.Headers.Optional.SizeOfCode = int(optional._32.SizeOfCode)
temp_result.Headers.Optional.SizeOfInitializedData = int(optional._32.SizeOfInitializedData)
temp_result.Headers.Optional.SizeOfUninitializedData = int(optional._32.SizeOfUninitializedData)
temp_result.Headers.Optional.AddressOfEntryPoint = int(optional._32.AddressOfEntryPoint)
temp_result.Headers.Optional.BaseOfCode = int(optional._32.BaseOfCode)
temp_result.Headers.Optional.ImageBase = int(optional._32.ImageBase)
temp_result.Headers.Optional.SectionAlignment = int(optional._32.SectionAlignment)
temp_result.Headers.Optional.FileAlignment = int(optional._32.FileAlignment)
temp_result.Headers.Optional.MajorOperatingSystemVersion = int(optional._32.MajorOperatingSystemVersion)
temp_result.Headers.Optional.MinorOperatingSystemVersion = int(optional._32.MinorOperatingSystemVersion)
temp_result.Headers.Optional.MajorImageVersion = int(optional._32.MajorImageVersion)
temp_result.Headers.Optional.MinorImageVersion = int(optional._32.MinorImageVersion)
temp_result.Headers.Optional.MajorSubsystemVersion = int(optional._32.MajorSubsystemVersion)
temp_result.Headers.Optional.MinorSubsystemVersion = int(optional._32.MinorSubsystemVersion)
temp_result.Headers.Optional.Reserved1 = int(optional._32.Reserved1)
temp_result.Headers.Optional.SizeOfImage = int(optional._32.SizeOfImage)
temp_result.Headers.Optional.SizeOfHeaders = int(optional._32.SizeOfHeaders)
temp_result.Headers.Optional.CheckSum = int(optional._32.CheckSum)
temp_result.Headers.Optional.Subsystem = int(optional._32.Subsystem)
temp_result.Headers.Optional.DllCharacteristics = int(optional._32.DllCharacteristics)
temp_result.Headers.Optional.SizeOfStackReserve = int(optional._32.SizeOfStackReserve)
temp_result.Headers.Optional.SizeOfStackCommit = int(optional._32.SizeOfStackCommit)
temp_result.Headers.Optional.SizeOfHeapReserve = int(optional._32.SizeOfHeapReserve)
temp_result.Headers.Optional.SizeOfHeapCommit = int(optional._32.SizeOfHeapCommit)
temp_result.Headers.Optional.LoaderFlags = int(optional._32.LoaderFlags)
temp_result.Headers.Optional.NumberOfRvaAndSizes = int(optional._32.NumberOfRvaAndSizes)
}
if optional._type == C.MAGIC_PE64 {
//fmt.Println(optional._32.Magic)
temp_result.Headers.Optional.Magic = int(optional._64.Magic)
temp_result.Headers.Optional.MajorLinkerVersion = int(optional._64.MajorLinkerVersion)
temp_result.Headers.Optional.MinorLinkerVersion = int(optional._64.MinorLinkerVersion)
temp_result.Headers.Optional.SizeOfCode = int(optional._64.SizeOfCode)
temp_result.Headers.Optional.SizeOfInitializedData = int(optional._64.SizeOfInitializedData)
temp_result.Headers.Optional.SizeOfUninitializedData = int(optional._64.SizeOfUninitializedData)
temp_result.Headers.Optional.AddressOfEntryPoint = int(optional._64.AddressOfEntryPoint)
temp_result.Headers.Optional.BaseOfCode = int(optional._64.BaseOfCode)
temp_result.Headers.Optional.ImageBase = int(optional._64.ImageBase)
temp_result.Headers.Optional.SectionAlignment = int(optional._64.SectionAlignment)
temp_result.Headers.Optional.FileAlignment = int(optional._64.FileAlignment)
temp_result.Headers.Optional.MajorOperatingSystemVersion = int(optional._64.MajorOperatingSystemVersion)
temp_result.Headers.Optional.MinorOperatingSystemVersion = int(optional._64.MinorOperatingSystemVersion)
temp_result.Headers.Optional.MajorImageVersion = int(optional._64.MajorImageVersion)
temp_result.Headers.Optional.MinorImageVersion = int(optional._64.MinorImageVersion)
temp_result.Headers.Optional.MajorSubsystemVersion = int(optional._64.MajorSubsystemVersion)
temp_result.Headers.Optional.MinorSubsystemVersion = int(optional._64.MinorSubsystemVersion)
temp_result.Headers.Optional.Reserved1 = int(optional._64.Reserved1)
temp_result.Headers.Optional.SizeOfImage = int(optional._64.SizeOfImage)
temp_result.Headers.Optional.SizeOfHeaders = int(optional._64.SizeOfHeaders)
temp_result.Headers.Optional.CheckSum = int(optional._64.CheckSum)
temp_result.Headers.Optional.Subsystem = int(optional._64.Subsystem)
temp_result.Headers.Optional.DllCharacteristics = int(optional._64.DllCharacteristics)
temp_result.Headers.Optional.SizeOfStackReserve = int(optional._64.SizeOfStackReserve)
temp_result.Headers.Optional.SizeOfStackCommit = int(optional._64.SizeOfStackCommit)
temp_result.Headers.Optional.SizeOfHeapReserve = int(optional._64.SizeOfHeapReserve)
temp_result.Headers.Optional.SizeOfHeapCommit = int(optional._64.SizeOfHeapCommit)
temp_result.Headers.Optional.LoaderFlags = int(optional._64.LoaderFlags)
temp_result.Headers.Optional.NumberOfRvaAndSizes = int(optional._64.NumberOfRvaAndSizes)
}
return temp_result
}
func header_directories_count(ctx C.pe_ctx_t) int {
count := C.pe_directories_count(&ctx)
return int(count)
}
func header_directories(ctx C.pe_ctx_t, temp_result *Result) *Result {
count := C.pe_directories_count(&ctx)
if int(count) == 0 {
return temp_result // return empty result
}
length := int(count)
var directories **C.IMAGE_DATA_DIRECTORY = C.pe_directories(&ctx)
sliceV := (*[1 << 30](*C.IMAGE_DATA_DIRECTORY))(unsafe.Pointer(directories))[:length:length]
if directories == nil {
return temp_result // return empty result
}
temp_result.Directories = make([]*Directory, length)
var i C.ImageDirectoryEntry = 0
for int(i) < length {
// fmt.Println(sliceV[i].VirtualAddress)
temp_result.Directories[i] = &Directory{
Name: C.GoString(C.pe_directory_name(i)),
VirtualAddress: fmt.Sprintf("%X", int(sliceV[i].VirtualAddress)), // returns Virutal address
Size: int(sliceV[i].Size),
}
i++
}
return temp_result
}
func header_sections_count(ctx C.pe_ctx_t) int {
sections_count := C.pe_sections_count(&ctx)
return int(sections_count)
}
func get_hashes(ctx C.pe_ctx_t, temp_result *Result) *Result {
// File Hash
file_hash := C.get_file_hash(&ctx)
defer C.pe_dealloc_filehash(file_hash)
if file_hash.err != C.LIBPE_E_OK {
return temp_result
}
temp_result.PEHashes.FileHash.Name = fmt.Sprintf("%s", C.GoString(file_hash.name))
temp_result.PEHashes.FileHash.Md5 = fmt.Sprintf("%s", C.GoString(file_hash.md5))
temp_result.PEHashes.FileHash.Sha1 = fmt.Sprintf("%s", C.GoString(file_hash.sha1))
temp_result.PEHashes.FileHash.Sha256 = fmt.Sprintf("%s", C.GoString(file_hash.sha256))
temp_result.PEHashes.FileHash.Ssdeep = fmt.Sprintf("%s", C.GoString(file_hash.ssdeep))
imphash := C.pe_imphash(&ctx, 2)
temp_result.PEHashes.Imphash = C.GoString(imphash)
fmt.Println(C.GoString(imphash))
// Section Hash
var sections C.pe_hash_section_t = C.get_sections_hash(&ctx)
defer C.pe_dealloc_sections_hashes(sections)
//count := C.pe_sections_count(&ctx)
length := int(sections.count)
sliceV := (*[1 << 30](C.pe_hash_t))(unsafe.Pointer(sections.sections))[:length:length] // converting c array into Go slices
temp_result.PEHashes.Sections = make([]*Hash, length)
for i := 0; i < length; i++ {
temp_result.PEHashes.Sections[i] = &Hash{
Name: C.GoString(sliceV[i].name),
Md5: C.GoString(sliceV[i].md5),
Sha1: C.GoString(sliceV[i].sha1),
Sha256: C.GoString(sliceV[i].sha256),
Ssdeep: C.GoString(sliceV[i].ssdeep),
}
}
// Header Hash
headers := C.get_headers_hash(&ctx)
defer C.pe_dealloc_hdr_hashes(headers)
if headers.err != C.LIBPE_E_OK {
return temp_result
}
//temp_result.PEHashes.Headers = make([]*Hash, 4); // only 3 headers : dos, coff, optional
// for Dos header
temp_result.PEHashes.Headers[0].Name = fmt.Sprintf("%s", C.GoString(headers.dos.name))
temp_result.PEHashes.Headers[0].Md5 = fmt.Sprintf("%s", C.GoString(headers.dos.md5))
temp_result.PEHashes.Headers[0].Sha1 = fmt.Sprintf("%s", C.GoString(headers.dos.sha1))
temp_result.PEHashes.Headers[0].Sha256 = fmt.Sprintf("%s", C.GoString(headers.dos.sha256))
temp_result.PEHashes.Headers[0].Ssdeep = fmt.Sprintf("%s", C.GoString(headers.dos.ssdeep))
// for coff Header
temp_result.PEHashes.Headers[1].Name = fmt.Sprintf("%s", C.GoString(headers.coff.name))
temp_result.PEHashes.Headers[1].Md5 = fmt.Sprintf("%s", C.GoString(headers.coff.md5))
temp_result.PEHashes.Headers[1].Sha1 = fmt.Sprintf("%s", C.GoString(headers.coff.sha1))
temp_result.PEHashes.Headers[1].Sha256 = fmt.Sprintf("%s", C.GoString(headers.coff.sha256))
temp_result.PEHashes.Headers[1].Ssdeep = fmt.Sprintf("%s", C.GoString(headers.coff.ssdeep))
// for Optional Header
temp_result.PEHashes.Headers[2].Name = fmt.Sprintf("%s", C.GoString(headers.optional.name))
temp_result.PEHashes.Headers[2].Md5 = fmt.Sprintf("%s", C.GoString(headers.optional.md5))
temp_result.PEHashes.Headers[2].Sha1 = fmt.Sprintf("%s", C.GoString(headers.optional.sha1))
temp_result.PEHashes.Headers[2].Sha256 = fmt.Sprintf("%s", C.GoString(headers.optional.sha256))
temp_result.PEHashes.Headers[2].Ssdeep = fmt.Sprintf("%s", C.GoString(headers.optional.ssdeep))
return temp_result
}
func header_sections(ctx C.pe_ctx_t, temp_result *Result) *Result {
count := C.pe_sections_count(&ctx)
if int(count) == 0 {
return temp_result // return empty result
}
length := int(count)
var sections **C.IMAGE_SECTION_HEADER = C.pe_sections(&ctx)
sliceV := (*[1 << 30](*C.IMAGE_SECTION_HEADER))(unsafe.Pointer(sections))[:length:length]
if sections == nil {
return &Result{} // return empty result
}
type tagKbdInput struct {
typ uint32
va C.uint32_t
}
temp_result.Sections = make([]*Section, length)
for i := 0; i < length; i++ {
temp_result.Sections[i] = &Section{
Name: fmt.Sprintf("%s", sliceV[i].Name),
VirtualAddress: fmt.Sprintf("%X", int(sliceV[i].VirtualAddress)),
PointerToRawData: fmt.Sprintf("%X", int(sliceV[i].PointerToRawData)),
NumberOfRelocations: int(sliceV[i].NumberOfRelocations),
Characteristics: fmt.Sprintf("%X", int(sliceV[i].VirtualAddress)),
//VirtualSize: ,
SizeOfRawData: int(sliceV[i].SizeOfRawData),
}
//fmt.Println((*tagKbdInput)(unsafe.Pointer(sliceV[i])).va)
}
return temp_result
}
func getTimestamp(unixtime int) string {
i, err := strconv.ParseInt("956165981", 10, 64)
if err != nil {
fmt.Println("panic error")
panic(err)
}
tm := time.Unix(i, 0)
timestamp := fmt.Sprintf("%s", tm.String())
return timestamp
}
create seperate functions to convert c arrays to Go slices
package main
// #include <libpe/include/libpe/pe.h>
// #include <libpe/include/libpe/hashes.h>
// #include <libpe/include/libpe/misc.h>
// #include <libpe/include/libpe/imports.h>
// #include <libpe/include/libpe/exports.h>
// #include <libpe/include/libpe/peres.h>
// #cgo LDFLAGS: -lpe -lssl -lcrypto -lm
// #cgo CFLAGS: -std=c99
import "C"
import (
//Imports for measuring execution time of requests
"time"
"reflect"
//Imports for reading the config, logging and command line argument parsing.
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"unsafe"
//Imports for serving on a socket and handling routing of incoming request.
"encoding/json"
"github.com/julienschmidt/httprouter"
"net/http"
// Import to reduce service memory footprint
"runtime/debug"
)
type Result struct {
Headers Header `json:"Headers"`
Directories []*Directory `json:"directories"`
Directories_count int `json:"directories_count"`
Sections []*Section `json:"sections"`
Sections_count int `json:"sectionscount"`
PEHashes Hashes `json:"PEHash"`
Exports []*Export `json:"Exports"`
Imports []Import `json:"Imports"`
Resources Resource `json:"resources"`
Entrophy float32 `json:"Entrophy"`
FPUTrick bool `json:"FPUtrick"`
CPLAnalysis int `json:"CPLAnalysis"` // 0 -> No Threat, 1 -> Malware, -1 -> Not a dll.
CheckFakeEntryPoint int `json:"CheckFakeEntrypoint"` // 0 -> Normal, 1 -> fake, -1 -> null.
}
type Resource struct {
ResourceDirectory []*RDT_RESOURCE_DIRECTORY `json:"RESOURCE_DIRECTORY"`
DirectoryEntry []*RDT_DIRECTORY_ENTRY `json:"DIRECTORY_ENTRY"`
DataString []*RDT_DATA_STRING `json:"DATA_STRING"`
DataEntry []*RDT_DATA_ENTRY `json:"DATA_ENTRY"`
}
type RDT_RESOURCE_DIRECTORY struct {
NodeType int `json:"NodeType"`
Characteristics int `json:"Characteristics"`
TimeDateStamp int `json:"TimeDateStamp"`
MajorVersion int `json:"MajorVersion"`
MinorVersion int `json:"MinorVersion"`
NumberOfNamedEntries int `json:"NumberOfNamedEntries"`
NumberOfIdEntries int `json:"NumberOfIdEntries"`
}
type RDT_DIRECTORY_ENTRY struct {
NodeType int `json:"NodeType"`
NameOffset int `json:"NameOffset"`
NameIsString int `json:"NameIsString"`
OffsetIsDirectory int `json:"OffsetIsDirectory"`
DataIsDirectory int `json:"DataIsDirectory"`
}
type RDT_DATA_STRING struct {
NodeType int `json"NodeType"`
Strlen int `json:"Strlen"`
String int `json:"String"`
}
type RDT_DATA_ENTRY struct {
NodeType int `json:"NodeType"`
OffsetToData int `json:"OffsetToData"`
Size int `json:"Size"`
CodePage int `json:"CodePage"`
Reserved int `json:"Reserved"`
}
type Import struct {
Dllname string `json:"DllName"`
Functions []string `json:"Functions"`
}
type Export struct {
Addr string `json:"Addr"`
FunctionName string `json:"FunctionName"`
}
type Header struct {
Optional OptionalHeaders `json:"Optional"`
Dos DosHeaders `json:"DosHeaders"`
Coff CoffHeaders `json:"CoffHeaders"`
}
type OptionalHeaders struct {
Magic int `json:"Magic"`
MajorLinkerVersion int `json:"MajorLinkerVersion"`
MinorLinkerVersion int `json:"MinorLinkerVersion"`
SizeOfCode int `json:"SizeOfCode"`
SizeOfInitializedData int `json:"SizeOfUninitializedData"`
SizeOfUninitializedData int `json:"SizeOfUninitializedData"`
AddressOfEntryPoint int `json:"AddressOfEntryPoint"`
BaseOfCode int `json:"BaseOfCode"`
ImageBase int `json:"ImageBase"`
SectionAlignment int `json:"SectionAlignment"`
FileAlignment int `json:"FileAlignment"`
MajorOperatingSystemVersion int `json:"MajorOperatingSystemVersion"`
MinorOperatingSystemVersion int `json:"MinorOperatingSystemVersion"`
MajorImageVersion int `json:"MajorImageVersion"`
MinorImageVersion int `json:"MinorImageVersion"`
MajorSubsystemVersion int `json:"MajorSubsystemVersion"`
MinorSubsystemVersion int `json:"MinorSubsystemVersion"`
Reserved1 int `json:Reserved1"`
SizeOfImage int `json:"SizeOfImage"`
SizeOfHeaders int `json:"SizeOfHeaders"`
CheckSum int `json:"CheckSum"`
Subsystem int `json:"Subsystem"`
DllCharacteristics int `json:"DllCharacteristics"`
SizeOfStackReserve int `json:"SizeOfStackReserve"`
SizeOfStackCommit int `json:"SizeOfStackCommit"`
SizeOfHeapReserve int `json:"SizeOfHeapReserve"`
SizeOfHeapCommit int `json:"SizeOfHeapCommit"`
LoaderFlags int `json:"LoaderFlags"`
NumberOfRvaAndSizes int `json:"NumberOfRvaAndSizes"`
}
type DosHeaders struct {
Magic int `json:"e_magic"` // Magic Number
Cblp int `json:"e_cblp"`
Cp int `json:"e_cblp"`
Crlc int `json:"e_crlc"`
Cparhdr int `json:"e_cparhdr"`
Minalloc int `json:"e_minalloc"`
Maxalloc int `json:"e_maxalloc"`
Ss int `json:"e_ss"`
Sp int `json:"e_sp"`
Csum int `json:"e_csum"`
Ip int `json:"e_ip"`
Cs int `json:"e_cs"`
Lfarlc int `json:"e_lfarlc"`
Ovno int `json:"e_ovno"`
Res int `json:"e_res"`
Oemid int `json:"e_oemid"`
Oeminfo int `json:"e_oeminfo"`
Res2 int `json:"e_res2"`
Lfanew int `json:"e_lfanew"`
}
type CoffHeaders struct {
Machine string `json:"Machine"`
NumberOfSections string `json:"NumberOfSections"`
TimeDateStamp string `json:"TimeDateStamp"`
PointerToSymbolTable string `json:"PointerToSymbolTable"`
NumberOfSymbols string `json:"NumberOfSymbols"`
SizeOfOptionalHeader string `json:"SizeOfOptionalHeader"`
Characteristics string `json:"Characteristics"`
}
type Directory struct {
Name string `json:"Name"`
VirtualAddress string `json:"VirtualAddress"`
Size int `json:"Size"`
}
type Section struct {
Name string `json:"Name"`
VirtualAddress string `json:"VirtualAddress"`
PointerToRawData string `json:"PointerToRawData"`
NumberOfRelocations int `json:"NumberOfRelocations"`
Characteristics string `json:"Characteristics"`
VirtualSize int `json"VirtualSize"`
SizeOfRawData int `json:"SizeOfRawData"`
}
type Hash struct {
Name string `json:"Name"`
Md5 string `json:"md5"`
Sha1 string `json:"sha1"`
Sha256 string `json:"sha256"`
Ssdeep string `json:"ssdeep"`
}
type Hashes struct {
Headers [3]Hash `json:"Headers"` // Only 3 Headers : dos, coff, optional
Sections []*Hash `json:"Sections"`
FileHash Hash `json:"PEFile"`
Imphash string `json"Imphash"`
}
type Metadata struct {
Name string
Version string
Description string
Copyright string
License string
}
// config structs
type Setting struct {
HTTPBinding string `json:"HTTPBinding"`
}
type Config struct {
Settings Setting `json:"settings"`
}
var (
config *Config
info *log.Logger
metadata Metadata = Metadata{
Name: "pemta",
Version: "1.0",
Description: "./README.md",
Copyright: "Copyright 2017 Holmes Group LLC",
License: "./LICENSE",
}
)
func main() {
var (
err error
configPath string
)
// setup logging
info = log.New(os.Stdout, "", log.Ltime|log.Lshortfile)
// load config
flag.StringVar(&configPath, "config", "", "Path to the configuration file")
flag.Parse()
config, err = load_config(configPath)
if err != nil {
log.Fatalln("Couldn't decode config file without errors!", err.Error())
}
// setup http handlers
router := httprouter.New()
router.GET("/analyze/", handler_analyze)
router.GET("/", handler_info)
info.Printf("Binding to %s\n", config.Settings.HTTPBinding)
log.Fatal(http.ListenAndServe(config.Settings.HTTPBinding, router))
}
// Parse a configuration file into a Config structure.
func load_config(configPath string) (*Config, error) {
config := &Config{}
// if no path is supplied look in the current dir
if configPath == "" {
configPath, _ = filepath.Abs(filepath.Dir(os.Args[0]))
configPath += "/service.conf"
}
cfile, _ := os.Open(configPath)
if err := json.NewDecoder(cfile).Decode(&config); err != nil {
return config, err
}
if metadata.Description != "" {
if data, err := ioutil.ReadFile(string(metadata.Description)); err == nil {
metadata.Description = strings.Replace(string(data), "\n", "<br>", -1)
}
}
if metadata.License != "" {
if data, err := ioutil.ReadFile(string(metadata.License)); err == nil {
metadata.License = strings.Replace(string(data), "\n", "<br>", -1)
}
}
return config, nil
}
func handler_info(f_response http.ResponseWriter, r *http.Request, ps httprouter.Params) {
fmt.Fprintf(f_response, `<p>%s - %s</p>
<hr>
<p>%s</p>
<hr>
<p>%s</p>
`,
metadata.Name,
metadata.Version,
metadata.Description,
metadata.License)
}
func handler_analyze(f_response http.ResponseWriter, request *http.Request, params httprouter.Params) {
// ms-xy: calling FreeOSMemory manually drastically reduces the memory
// footprint at the cost of a little bit of cpu efficiency (due to gc runs
// after every call to handler_analyze)
defer debug.FreeOSMemory()
info.Println("Serving request:", request)
start_time := time.Now()
obj := request.URL.Query().Get("obj")
if obj == "" {
http.Error(f_response, "Missing argument 'obj'", 400)
return
}
sample_path := "/tmp/" + obj
if _, err := os.Stat(sample_path); os.IsNotExist(err) {
http.NotFound(f_response, request)
info.Printf("Error accessing sample (file: %s):", sample_path)
info.Println(err)
return
}
var err C.pe_err_e
var ctx C.pe_ctx_t
cstr := C.CString(sample_path)
// defer C.free(unsafe.Pointer(cstr))
err = C.pe_load_file(&ctx, cstr)
if err != C.LIBPE_E_OK {
C.pe_error_print(C.stderr, err)
return
}
err = C.pe_parse(&ctx)
if err != C.LIBPE_E_OK {
C.pe_error_print(C.stderr, err)
return
}
if !C.pe_is_pe(&ctx) {
return
}
result := &Result{}
result = header_coff(ctx, result)
result = header_dos(ctx, result)
result = header_optional(ctx, result)
result.Directories_count = header_directories_count(ctx)
result = header_directories(ctx, result)
result = header_sections(ctx, result)
result.Sections_count = header_sections_count(ctx)
result = get_hashes(ctx, result)
result = get_exports(ctx, result)
result = get_imports(ctx, result)
result = get_resources(ctx, result)
result.Entrophy = get_entrophy_file(ctx)
result.FPUTrick = get_fputrick(ctx)
result.CPLAnalysis = get_cpl_analysis(ctx)
result.CheckFakeEntryPoint = check_fake_entrypoint(ctx)
f_response.Header().Set("Content-Type", "text/json; charset=utf-8")
json2http := json.NewEncoder(f_response)
if err := json2http.Encode(result); err != nil {
http.Error(f_response, "Generating JSON failed", 500)
info.Println("JSON encoding failed", err.Error())
return
}
elapsed_time := time.Since(start_time)
info.Printf("Done, total time elapsed %s.\n", elapsed_time)
}
func get_resources(ctx C.pe_ctx_t, temp_result *Result) *Result {
resources_count := C.get_resources_count(&ctx)
resources := C.get_resources(&ctx)
defer C.pe_dealloc_peres(resources)
res_count := int(resources_count.resourcesDirectory)
dirEntry_count := int(resources_count.directoryEntry)
dataString_count := int(resources_count.dataString)
dataEntry_count := int(resources_count.dataEntry)
if resources.err != C.LIBPE_E_OK {
return temp_result;
}
temp_result.Resources.ResourceDirectory = make([]*RDT_RESOURCE_DIRECTORY, res_count)
temp_result.Resources.DirectoryEntry = make([]*RDT_DIRECTORY_ENTRY, dirEntry_count)
temp_result.Resources.DataString = make([]*RDT_DATA_STRING, dataString_count)
temp_result.Resources.DataEntry = make([]*RDT_DATA_ENTRY, dataEntry_count)
resourcesDirectory := arr_of_resourceDirectory(resources.resourcesDirectory, res_count)
for i := 0; i < res_count; i++ {
temp_result.Resources.ResourceDirectory[i] = &RDT_RESOURCE_DIRECTORY{
NodeType: int(resourcesDirectory[i].NodeType),
Characteristics: int(resourcesDirectory[i].Characteristics),
TimeDateStamp: int(resourcesDirectory[i].TimeDateStamp),
MajorVersion: int(resourcesDirectory[i].MajorVersion),
MinorVersion: int(resourcesDirectory[i].MinorVersion),
NumberOfNamedEntries: int(resourcesDirectory[i].NumberOfNamedEntries),
NumberOfIdEntries: int(resourcesDirectory[i].NumberOfIdEntries),
}
}
directoryEntry := arr_of_directoryEntry(resources.directoryEntry, dirEntry_count)
for i := 0; i < dirEntry_count; i++ {
temp_result.Resources.DirectoryEntry[i] = &RDT_DIRECTORY_ENTRY{
NodeType: int(directoryEntry[i].NodeType),
NameOffset: int(directoryEntry[i].NameOffset),
NameIsString: int(directoryEntry[i].NameIsString),
OffsetIsDirectory: int(directoryEntry[i].OffsetIsDirectory),
DataIsDirectory: int(directoryEntry[i].DataIsDirectory),
}
}
dataString := arr_of_dataString(resources.dataString, dataString_count)
for i := 0; i < dataString_count; i++ {
temp_result.Resources.DataString[i] = &RDT_DATA_STRING{
NodeType: int(dataString[i].NodeType),
Strlen: int(dataString[i].Strlen),
String: int(dataString[i].String),
}
}
dataEntry := arr_of_dataEntry(resources.dataEntry, dirEntry_count)
for i := 0; i < dataEntry_count; i++ {
temp_result.Resources.DataEntry[i] = &RDT_DATA_ENTRY{
NodeType: int(dataEntry[i].NodeType),
OffsetToData: int(dataEntry[i].OffsetToData),
Size: int(dataEntry[i].Size),
CodePage: int(dataEntry[i].CodePage),
Reserved: int(dataEntry[i].Reserved),
}
}
return temp_result
}
func get_imports(ctx C.pe_ctx_t, temp_result *Result) *Result {
imports := C.pe_get_imports(&ctx)
defer C.pe_dealloc_imports(imports);
if imports.err != C.LIBPE_E_OK {
return temp_result;
}
dll_count := int(imports.dll_count)
if dll_count == 0 {
return temp_result
}
dlls := arr_of_dlls(imports.dlls, dll_count)
temp_result.Imports = make([]Import, dll_count)
for i := 0; i < dll_count; i++ {
temp_result.Imports[i].Dllname = C.GoString(dlls[i].name)
functions_count := int(dlls[i].functions_count)
dll_functions := arr_of_dll_functions(dlls[i].functions, functions_count)
temp_result.Imports[i].Functions = make([]string, functions_count)
for j := 0; j < functions_count; j++ {
temp_result.Imports[i].Functions[j] = C.GoString(dll_functions[j].name)
}
}
return temp_result
}
func check_fake_entrypoint(ctx C.pe_ctx_t) int {
fake := C.pe_has_fake_entrypoint(&ctx)
return int(fake)
}
func get_cpl_analysis(ctx C.pe_ctx_t) int {
cpl := C.pe_get_cpl_analysis(&ctx)
return int(cpl)
}
func get_fputrick(ctx C.pe_ctx_t) bool {
detected := C.pe_fpu_trick(&ctx)
return bool(detected)
}
func get_entrophy_file(ctx C.pe_ctx_t) float32 {
entrophy := C.pe_calculate_entropy_file(&ctx)
return float32(entrophy)
}
func get_exports(ctx C.pe_ctx_t, temp_result *Result) *Result {
exports := C.pe_get_exports(&ctx)
functions_count := int(exports.functions_count)
defer C.pe_dealloc_exports(exports)
if exports.err != C.LIBPE_E_OK {
return temp_result
}
if functions_count == 0 {
return temp_result
}
exports_functions := arr_of_exports_functions(exports.functions, functions_count)
temp_result.Exports = make([]*Export, functions_count)
for i := 0; i < functions_count; i++ {
temp_result.Exports[i] = &Export{
Addr: fmt.Sprintf("%X", exports_functions[i].addr),
FunctionName: C.GoString(exports_functions[i].name),
}
}
return temp_result
}
func header_coff(ctx C.pe_ctx_t, temp_result *Result) *Result {
coff := C.pe_coff(&ctx)
temp_result.Headers.Coff.Machine = fmt.Sprintf("%X", int(coff.Machine))
temp_result.Headers.Coff.NumberOfSections = fmt.Sprintf("%X", int(coff.NumberOfSections))
timestamp := getTimestamp(int(coff.TimeDateStamp))
temp_result.Headers.Coff.TimeDateStamp = fmt.Sprintf("%s", timestamp)
temp_result.Headers.Coff.PointerToSymbolTable = fmt.Sprintf("%X", int(coff.PointerToSymbolTable))
temp_result.Headers.Coff.NumberOfSymbols = fmt.Sprintf("%X", int(coff.NumberOfSymbols))
temp_result.Headers.Coff.SizeOfOptionalHeader = fmt.Sprintf("%X", int(coff.SizeOfOptionalHeader))
temp_result.Headers.Coff.Characteristics = fmt.Sprintf("%X", int(coff.Characteristics))
return temp_result
}
func header_dos(ctx C.pe_ctx_t, temp_result *Result) *Result {
dos := C.pe_dos(&ctx)
temp_result.Headers.Dos.Magic = int(dos.e_magic)
temp_result.Headers.Dos.Cblp = int(dos.e_cblp)
temp_result.Headers.Dos.Cp = int(dos.e_cp)
temp_result.Headers.Dos.Crlc = int(dos.e_crlc)
temp_result.Headers.Dos.Cparhdr = int(dos.e_cparhdr)
temp_result.Headers.Dos.Minalloc = int(dos.e_minalloc)
temp_result.Headers.Dos.Maxalloc = int(dos.e_maxalloc)
temp_result.Headers.Dos.Ss = int(dos.e_ss)
temp_result.Headers.Dos.Sp = int(dos.e_sp)
temp_result.Headers.Dos.Csum = int(dos.e_csum)
temp_result.Headers.Dos.Ip = int(dos.e_ip)
temp_result.Headers.Dos.Cs = int(dos.e_cs)
temp_result.Headers.Dos.Lfarlc = int(dos.e_lfarlc)
temp_result.Headers.Dos.Ovno = int(dos.e_ovno)
temp_result.Headers.Dos.Res = int(dos.e_res[3])
temp_result.Headers.Dos.Oemid = int(dos.e_oemid)
temp_result.Headers.Dos.Oeminfo = int(dos.e_oeminfo)
temp_result.Headers.Dos.Res2 = int(dos.e_res2[9])
temp_result.Headers.Dos.Lfanew = int(dos.e_lfanew)
return temp_result
}
func header_optional(ctx C.pe_ctx_t, temp_result *Result) *Result {
optional := C.pe_optional(&ctx)
if optional._type == C.MAGIC_PE32 {
temp_result.Headers.Optional.Magic = int(optional._32.Magic)
temp_result.Headers.Optional.MajorLinkerVersion = int(optional._32.MajorLinkerVersion)
temp_result.Headers.Optional.MinorLinkerVersion = int(optional._32.MinorLinkerVersion)
temp_result.Headers.Optional.SizeOfCode = int(optional._32.SizeOfCode)
temp_result.Headers.Optional.SizeOfInitializedData = int(optional._32.SizeOfInitializedData)
temp_result.Headers.Optional.SizeOfUninitializedData = int(optional._32.SizeOfUninitializedData)
temp_result.Headers.Optional.AddressOfEntryPoint = int(optional._32.AddressOfEntryPoint)
temp_result.Headers.Optional.BaseOfCode = int(optional._32.BaseOfCode)
temp_result.Headers.Optional.ImageBase = int(optional._32.ImageBase)
temp_result.Headers.Optional.SectionAlignment = int(optional._32.SectionAlignment)
temp_result.Headers.Optional.FileAlignment = int(optional._32.FileAlignment)
temp_result.Headers.Optional.MajorOperatingSystemVersion = int(optional._32.MajorOperatingSystemVersion)
temp_result.Headers.Optional.MinorOperatingSystemVersion = int(optional._32.MinorOperatingSystemVersion)
temp_result.Headers.Optional.MajorImageVersion = int(optional._32.MajorImageVersion)
temp_result.Headers.Optional.MinorImageVersion = int(optional._32.MinorImageVersion)
temp_result.Headers.Optional.MajorSubsystemVersion = int(optional._32.MajorSubsystemVersion)
temp_result.Headers.Optional.MinorSubsystemVersion = int(optional._32.MinorSubsystemVersion)
temp_result.Headers.Optional.Reserved1 = int(optional._32.Reserved1)
temp_result.Headers.Optional.SizeOfImage = int(optional._32.SizeOfImage)
temp_result.Headers.Optional.SizeOfHeaders = int(optional._32.SizeOfHeaders)
temp_result.Headers.Optional.CheckSum = int(optional._32.CheckSum)
temp_result.Headers.Optional.Subsystem = int(optional._32.Subsystem)
temp_result.Headers.Optional.DllCharacteristics = int(optional._32.DllCharacteristics)
temp_result.Headers.Optional.SizeOfStackReserve = int(optional._32.SizeOfStackReserve)
temp_result.Headers.Optional.SizeOfStackCommit = int(optional._32.SizeOfStackCommit)
temp_result.Headers.Optional.SizeOfHeapReserve = int(optional._32.SizeOfHeapReserve)
temp_result.Headers.Optional.SizeOfHeapCommit = int(optional._32.SizeOfHeapCommit)
temp_result.Headers.Optional.LoaderFlags = int(optional._32.LoaderFlags)
temp_result.Headers.Optional.NumberOfRvaAndSizes = int(optional._32.NumberOfRvaAndSizes)
}
if optional._type == C.MAGIC_PE64 {
temp_result.Headers.Optional.Magic = int(optional._64.Magic)
temp_result.Headers.Optional.MajorLinkerVersion = int(optional._64.MajorLinkerVersion)
temp_result.Headers.Optional.MinorLinkerVersion = int(optional._64.MinorLinkerVersion)
temp_result.Headers.Optional.SizeOfCode = int(optional._64.SizeOfCode)
temp_result.Headers.Optional.SizeOfInitializedData = int(optional._64.SizeOfInitializedData)
temp_result.Headers.Optional.SizeOfUninitializedData = int(optional._64.SizeOfUninitializedData)
temp_result.Headers.Optional.AddressOfEntryPoint = int(optional._64.AddressOfEntryPoint)
temp_result.Headers.Optional.BaseOfCode = int(optional._64.BaseOfCode)
temp_result.Headers.Optional.ImageBase = int(optional._64.ImageBase)
temp_result.Headers.Optional.SectionAlignment = int(optional._64.SectionAlignment)
temp_result.Headers.Optional.FileAlignment = int(optional._64.FileAlignment)
temp_result.Headers.Optional.MajorOperatingSystemVersion = int(optional._64.MajorOperatingSystemVersion)
temp_result.Headers.Optional.MinorOperatingSystemVersion = int(optional._64.MinorOperatingSystemVersion)
temp_result.Headers.Optional.MajorImageVersion = int(optional._64.MajorImageVersion)
temp_result.Headers.Optional.MinorImageVersion = int(optional._64.MinorImageVersion)
temp_result.Headers.Optional.MajorSubsystemVersion = int(optional._64.MajorSubsystemVersion)
temp_result.Headers.Optional.MinorSubsystemVersion = int(optional._64.MinorSubsystemVersion)
temp_result.Headers.Optional.Reserved1 = int(optional._64.Reserved1)
temp_result.Headers.Optional.SizeOfImage = int(optional._64.SizeOfImage)
temp_result.Headers.Optional.SizeOfHeaders = int(optional._64.SizeOfHeaders)
temp_result.Headers.Optional.CheckSum = int(optional._64.CheckSum)
temp_result.Headers.Optional.Subsystem = int(optional._64.Subsystem)
temp_result.Headers.Optional.DllCharacteristics = int(optional._64.DllCharacteristics)
temp_result.Headers.Optional.SizeOfStackReserve = int(optional._64.SizeOfStackReserve)
temp_result.Headers.Optional.SizeOfStackCommit = int(optional._64.SizeOfStackCommit)
temp_result.Headers.Optional.SizeOfHeapReserve = int(optional._64.SizeOfHeapReserve)
temp_result.Headers.Optional.SizeOfHeapCommit = int(optional._64.SizeOfHeapCommit)
temp_result.Headers.Optional.LoaderFlags = int(optional._64.LoaderFlags)
temp_result.Headers.Optional.NumberOfRvaAndSizes = int(optional._64.NumberOfRvaAndSizes)
}
return temp_result
}
func header_directories_count(ctx C.pe_ctx_t) int {
count := C.pe_directories_count(&ctx)
return int(count)
}
func header_directories(ctx C.pe_ctx_t, temp_result *Result) *Result {
count := C.pe_directories_count(&ctx)
if int(count) == 0 {
return temp_result // return empty result
}
length := int(count)
directories := C.pe_directories(&ctx)
arr_directories := arr_of_hdr_directories(directories, length)
if directories == nil {
return temp_result // return empty result
}
temp_result.Directories = make([]*Directory, length)
var i C.ImageDirectoryEntry = 0
for int(i) < length {
temp_result.Directories[i] = &Directory{
Name: C.GoString(C.pe_directory_name(i)),
VirtualAddress: fmt.Sprintf("%X", int(arr_directories[i].VirtualAddress)), // returns Virutal address
Size: int(arr_directories[i].Size),
}
i++
}
return temp_result
}
func header_sections_count(ctx C.pe_ctx_t) int {
sections_count := C.pe_sections_count(&ctx)
return int(sections_count)
}
func get_hashes(ctx C.pe_ctx_t, temp_result *Result) *Result {
// File Hash
file_hash := C.get_file_hash(&ctx)
defer C.pe_dealloc_filehash(file_hash)
if file_hash.err != C.LIBPE_E_OK {
return temp_result
}
temp_result.PEHashes.FileHash.Name = fmt.Sprintf("%s", C.GoString(file_hash.name))
temp_result.PEHashes.FileHash.Md5 = fmt.Sprintf("%s", C.GoString(file_hash.md5))
temp_result.PEHashes.FileHash.Sha1 = fmt.Sprintf("%s", C.GoString(file_hash.sha1))
temp_result.PEHashes.FileHash.Sha256 = fmt.Sprintf("%s", C.GoString(file_hash.sha256))
temp_result.PEHashes.FileHash.Ssdeep = fmt.Sprintf("%s", C.GoString(file_hash.ssdeep))
imphash := C.pe_imphash(&ctx, 2)
temp_result.PEHashes.Imphash = C.GoString(imphash)
sections := C.get_sections_hash(&ctx)
defer C.pe_dealloc_sections_hashes(sections)
length := int(sections.count)
arr_hash := arr_of_hash(sections.sections, length)
temp_result.PEHashes.Sections = make([]*Hash, length)
for i := 0; i < length; i++ {
temp_result.PEHashes.Sections[i] = &Hash{
Name: C.GoString(arr_hash[i].name),
Md5: C.GoString(arr_hash[i].md5),
Sha1: C.GoString(arr_hash[i].sha1),
Sha256: C.GoString(arr_hash[i].sha256),
Ssdeep: C.GoString(arr_hash[i].ssdeep),
}
}
// Header Hash
headers := C.get_headers_hash(&ctx)
defer C.pe_dealloc_hdr_hashes(headers)
if headers.err != C.LIBPE_E_OK {
return temp_result
}
// for Dos header
temp_result.PEHashes.Headers[0].Name = fmt.Sprintf("%s", C.GoString(headers.dos.name))
temp_result.PEHashes.Headers[0].Md5 = fmt.Sprintf("%s", C.GoString(headers.dos.md5))
temp_result.PEHashes.Headers[0].Sha1 = fmt.Sprintf("%s", C.GoString(headers.dos.sha1))
temp_result.PEHashes.Headers[0].Sha256 = fmt.Sprintf("%s", C.GoString(headers.dos.sha256))
temp_result.PEHashes.Headers[0].Ssdeep = fmt.Sprintf("%s", C.GoString(headers.dos.ssdeep))
// for coff Header
temp_result.PEHashes.Headers[1].Name = fmt.Sprintf("%s", C.GoString(headers.coff.name))
temp_result.PEHashes.Headers[1].Md5 = fmt.Sprintf("%s", C.GoString(headers.coff.md5))
temp_result.PEHashes.Headers[1].Sha1 = fmt.Sprintf("%s", C.GoString(headers.coff.sha1))
temp_result.PEHashes.Headers[1].Sha256 = fmt.Sprintf("%s", C.GoString(headers.coff.sha256))
temp_result.PEHashes.Headers[1].Ssdeep = fmt.Sprintf("%s", C.GoString(headers.coff.ssdeep))
// for Optional Header
temp_result.PEHashes.Headers[2].Name = fmt.Sprintf("%s", C.GoString(headers.optional.name))
temp_result.PEHashes.Headers[2].Md5 = fmt.Sprintf("%s", C.GoString(headers.optional.md5))
temp_result.PEHashes.Headers[2].Sha1 = fmt.Sprintf("%s", C.GoString(headers.optional.sha1))
temp_result.PEHashes.Headers[2].Sha256 = fmt.Sprintf("%s", C.GoString(headers.optional.sha256))
temp_result.PEHashes.Headers[2].Ssdeep = fmt.Sprintf("%s", C.GoString(headers.optional.ssdeep))
return temp_result
}
func header_sections(ctx C.pe_ctx_t, temp_result *Result) *Result {
count := C.pe_sections_count(&ctx)
if int(count) == 0 {
return temp_result // return empty result
}
length := int(count)
var sections **C.IMAGE_SECTION_HEADER = C.pe_sections(&ctx)
arr_sec_hdr := arr_of_sec_hdr(sections, length)
// arr_sec_hdr := (*[1 << 30](*C.IMAGE_SECTION_HEADER))(unsafe.Pointer(sections))[:length:length]
if sections == nil {
return temp_result // return empty result
}
type tagKbdInput struct {
typ uint32
va C.uint32_t
}
temp_result.Sections = make([]*Section, length)
for i := 0; i < length; i++ {
temp_result.Sections[i] = &Section{
Name: fmt.Sprintf("%s", arr_sec_hdr[i].Name),
VirtualAddress: fmt.Sprintf("%X", int(arr_sec_hdr[i].VirtualAddress)),
PointerToRawData: fmt.Sprintf("%X", int(arr_sec_hdr[i].PointerToRawData)),
NumberOfRelocations: int(arr_sec_hdr[i].NumberOfRelocations),
Characteristics: fmt.Sprintf("%X", int(arr_sec_hdr[i].VirtualAddress)),
SizeOfRawData: int(arr_sec_hdr[i].SizeOfRawData),
}
}
return temp_result
}
func getTimestamp(unixtime int) string {
i, err := strconv.ParseInt("956165981", 10, 64)
if err != nil {
fmt.Println("panic error")
panic(err)
}
tm := time.Unix(i, 0)
timestamp := fmt.Sprintf("%s", tm.String())
return timestamp
}
// converting c array into Go slices
// https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
func arr_of_resourceDirectory(resourcesDirectory *C.type_RDT_RESOURCE_DIRECTORY, count int) []C.type_RDT_RESOURCE_DIRECTORY {
_resourcesDirectory := (*[1 << 30](C.type_RDT_RESOURCE_DIRECTORY))(unsafe.Pointer(resourcesDirectory))[:count:count]
fmt.Println(reflect.TypeOf(_resourcesDirectory))
return _resourcesDirectory
}
func arr_of_directoryEntry(directoryEntry *C.type_RDT_DIRECTORY_ENTRY, count int ) []C.type_RDT_DIRECTORY_ENTRY {
_directoryEntry := (*[1 << 30](C.type_RDT_DIRECTORY_ENTRY))(unsafe.Pointer(directoryEntry))[:count:count]
return _directoryEntry
}
func arr_of_dataString(dataString *C.type_RDT_DATA_STRING, count int) []C.type_RDT_DATA_STRING {
_dataString := (*[1 << 30](C.type_RDT_DATA_STRING))(unsafe.Pointer(dataString))[:count:count]
return _dataString
}
func arr_of_dataEntry(dataEntry *C.type_RDT_DATA_ENTRY, count int) []C.type_RDT_DATA_ENTRY {
_dataEntry := (*[1 << 30](C.type_RDT_DATA_ENTRY))(unsafe.Pointer(dataEntry))[:count:count]
return _dataEntry
}
func arr_of_dlls(dlls *C.pe_imported_dll_t, count int) []C.pe_imported_dll_t {
_dlls := (*[1 << 30](C.pe_imported_dll_t))(unsafe.Pointer(dlls))[:count:count]
return _dlls
}
func arr_of_dll_functions(functions *C.pe_imported_function_t, count int) []C.pe_imported_function_t {
_dll_functions := (*[1 << 30](C.pe_imported_function_t))(unsafe.Pointer(functions))[:count:count]
return _dll_functions
}
func arr_of_exports_functions(functions *C.pe_exported_function_t, count int) []C.pe_exported_function_t {
_exports_functions := (*[1 << 30](C.pe_exported_function_t))(unsafe.Pointer(functions))[:count:count]
return _exports_functions
}
func arr_of_hdr_directories(directories **C.IMAGE_DATA_DIRECTORY, count int) []*C.IMAGE_DATA_DIRECTORY{
_arr_directories := (*[1 << 30](*C.IMAGE_DATA_DIRECTORY))(unsafe.Pointer(directories))[:count:count]
return _arr_directories
}
func arr_of_hash(hash_sections *C.pe_hash_t, count int) []C.pe_hash_t {
_arr_hash := (*[1 << 30](C.pe_hash_t))(unsafe.Pointer(hash_sections))[:count:count] // converting c array into Go slices
return _arr_hash
}
func arr_of_sec_hdr(sec_hdr **C.IMAGE_SECTION_HEADER, count int) []*C.IMAGE_SECTION_HEADER {
_sec_hdr := (*[1 << 30](*C.IMAGE_SECTION_HEADER))(unsafe.Pointer(sec_hdr))[:count:count]
return _sec_hdr
}
|
package api
import (
"net/http"
"net/url"
"socialapi/workers/common/response"
"socialapi/workers/mail/models"
)
func Parse(u *url.URL, h http.Header, req *models.Mail) (int, http.Header, interface{}, error) {
if err := req.Validate(); err != nil {
// faily silently, we dont want mail parser service to retry on
// the failed validation
return response.NewDefaultOK()
}
if err := req.Persist(); err != nil {
return response.NewBadRequest(err)
}
return response.NewDefaultOK()
}
socialapi/mailparse: handler package error is fixed
package api
import (
"net/http"
"net/url"
"socialapi/workers/common/response"
"socialapi/workers/email/mailparse/models"
)
func Parse(u *url.URL, h http.Header, req *models.Mail) (int, http.Header, interface{}, error) {
if err := req.Validate(); err != nil {
// faily silently, we dont want mail parser service to retry on
// the failed validation
return response.NewDefaultOK()
}
if err := req.Persist(); err != nil {
return response.NewBadRequest(err)
}
return response.NewDefaultOK()
}
|
package controller
import (
"errors"
"fmt"
mongomodels "koding/db/models"
"koding/db/mongodb/modelhelper"
"socialapi/models"
"strings"
"github.com/VerbalExpressions/GoVerbalExpressions"
"github.com/koding/logging"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
)
var (
ErrMigrated = errors.New("already migrated")
kodingChannelId int64
tagRegex = verbalexpressions.New().
BeginCapture().
Find("|#:JTag:").
Anything().
Then(":").
Anything().
Then("|").
EndCapture().
Regex()
)
type Controller struct {
log logging.Logger
}
func New(log logging.Logger) (*Controller, error) {
wc := &Controller{
log: log,
}
return wc, nil
}
func (mwc *Controller) Start() error {
if err := mwc.migrateAllTags(); err != nil {
return err
}
if err := mwc.migrateAllPosts(); err != nil {
return err
}
return nil
}
func (mwc *Controller) migrateAllPosts() error {
o := modelhelper.Options{
Sort: "meta.createdAt",
}
s := modelhelper.Selector{
"socialMessageId": modelhelper.Selector{"$exists": false},
}
kodingChannel, err := createGroupChannel("koding")
if err != nil {
return fmt.Errorf("Koding channel cannot be created: %s", err)
}
kodingChannelId = kodingChannel.Id
errCount := 0
handleError := func(su *mongomodels.StatusUpdate, err error) {
mwc.log.Error("an error occured for %s: %s", su.Id.Hex(), err)
errCount++
}
for {
o.Skip = errCount
su, err := modelhelper.GetStatusUpdate(s, o)
if err != nil {
if err == mgo.ErrNotFound {
mwc.log.Notice("Migration completed")
return nil
}
return fmt.Errorf("status update cannot be fetched: %s", err)
}
channelId, err := fetchGroupChannelId(su.Group)
if err != nil {
return fmt.Errorf("channel id cannot be fetched :%s", err)
}
// create channel message
cm := mapStatusUpdateToChannelMessage(&su)
cm.InitialChannelId = channelId
if err := insertChannelMessage(cm, su.OriginId.Hex()); err != nil {
handleError(&su, err)
continue
}
if err := addChannelMessageToMessageList(cm); err != nil {
handleError(&su, err)
continue
}
// create reply messages
if err := migrateComments(cm, &su, channelId); err != nil {
handleError(&su, err)
continue
}
if err := migrateLikes(cm, su.Id); err != nil {
handleError(&su, err)
continue
}
// update mongo status update channelMessageId field
if err := completePostMigration(&su, cm); err != nil {
handleError(&su, err)
continue
}
fmt.Printf("\n\nStatus update var %+v \n\n", su)
}
return nil
}
func createGroupChannel(groupName string) (*models.Channel, error) {
c := models.NewChannel()
c.Name = groupName
c.GroupName = groupName
c.TypeConstant = models.Channel_TYPE_GROUP
group, err := modelhelper.GetGroup(groupName)
if err != nil {
return nil, err
}
// TODO check it causes any error
c.PrivacyConstant = group.Privacy
// find group owner
creatorId, err := fetchGroupOwnerId(group)
if err != nil {
return nil, err
}
c.CreatorId = creatorId
// create channel
if err := c.Create(); err != nil {
return nil, err
}
return c, nil
}
func fetchGroupOwnerId(g *mongomodels.Group) (int64, error) {
// fetch owner relationship
s := modelhelper.Selector{
"targetName": "JAccount",
"as": "owner",
"sourceId": g.Id,
}
r, err := modelhelper.GetRelationship(s)
if err != nil {
return 0, err
}
a := models.NewAccount()
a.OldId = r.TargetId.Hex()
if err := a.FetchOrCreate(); err != nil {
return 0, err
}
return a.Id, nil
}
func insertChannelMessage(cm *models.ChannelMessage, accountOldId string) error {
if err := prepareMessageAccount(cm, accountOldId); err != nil {
return err
}
if err := cm.CreateRaw(); err != nil {
return err
}
return nil
}
func addChannelMessageToMessageList(cm *models.ChannelMessage) error {
cml := models.NewChannelMessageList()
cml.ChannelId = cm.InitialChannelId
cml.MessageId = cm.Id
cml.AddedAt = cm.CreatedAt
return cml.CreateRaw()
}
func (mwc *Controller) migrateComments(parentMessage *models.ChannelMessage, su *mongomodels.StatusUpdate, channelId int64) error {
s := modelhelper.Selector{
"sourceId": su.Id,
"targetName": "JComment",
}
rels, err := modelhelper.GetAllRelationships(s)
if err != nil {
if err == modelhelper.ErrNotFound {
return nil
}
return fmt.Errorf("comment relationships cannot be fetched: %s", err)
}
for _, r := range rels {
comment, err := modelhelper.GetCommentById(r.TargetId.Hex())
if err != nil {
return fmt.Errorf("comment cannot be fetched %s", err)
}
// comment is already migrated
if comment.SocialMessageId != 0 {
continue
}
reply := mapCommentToChannelMessage(comment)
reply.InitialChannelId = channelId
// insert as channel message
if err := insertChannelMessage(reply, comment.OriginId.Hex()); err != nil {
return fmt.Errorf("comment cannot be inserted %s", err)
}
// insert as message reply
mr := models.NewMessageReply()
mr.MessageId = parentMessage.Id
mr.ReplyId = reply.Id
mr.CreatedAt = reply.CreatedAt
if err := mr.CreateRaw(); err != nil {
return fmt.Errorf("comment cannot be inserted to message reply %s", err)
}
if err := migrateLikes(reply, comment.Id); err != nil {
return fmt.Errorf("likes cannot be migrated %s", err)
}
if err := completeCommentMigration(comment, reply); err != nil {
return fmt.Errorf("old comment cannot be flagged with new message id %s", err)
}
}
return nil
}
func (mwc *Controller) migrateLikes(cm *models.ChannelMessage, oldId bson.ObjectId) error {
s := modelhelper.Selector{
"sourceId": oldId,
"as": "like",
}
rels, err := modelhelper.GetAllRelationships(s)
if err != nil {
return fmt.Errorf("likes cannot be fetched %s", err)
}
for _, r := range rels {
a := models.NewAccount()
a.OldId = r.TargetId.Hex()
if err := a.FetchOrCreate(); err != nil {
return fmt.Errorf("interactor account could not found: %s", err)
}
i := models.NewInteraction()
i.MessageId = cm.Id
i.AccountId = a.Id
i.TypeConstant = models.Interaction_TYPE_LIKE
// creation date is not stored in mongo, so we could not set createdAt here.
if err := i.Create(); err != nil {
return fmt.Errorf("interaction could not created: %s", err)
}
}
return nil
}
func prepareMessageAccount(cm *models.ChannelMessage, accountOldId string) error {
a := models.NewAccount()
a.OldId = accountOldId
if err := a.FetchOrCreate(); err != nil {
return fmt.Errorf("account could not found: %s", err)
}
cm.AccountId = a.Id
return nil
}
func fetchGroupChannelId(groupName string) (int64, error) {
// koding group channel id is prefetched
if groupName == "koding" {
return kodingChannelId, nil
}
c, err := createGroupChannel(groupName)
if err != nil {
return 0, err
}
return c.Id, nil
}
func mapStatusUpdateToChannelMessage(su *mongomodels.StatusUpdate) *models.ChannelMessage {
cm := models.NewChannelMessage()
cm.Slug = su.Slug
prepareBody(cm, su.Body)
cm.TypeConstant = models.ChannelMessage_TYPE_POST
cm.CreatedAt = su.Meta.CreatedAt
prepareMessageMetaDates(cm, &su.Meta)
return cm
}
func mapCommentToChannelMessage(c *mongomodels.Comment) *models.ChannelMessage {
cm := models.NewChannelMessage()
cm.Body = c.Body
cm.TypeConstant = models.ChannelMessage_TYPE_REPLY
cm.CreatedAt = c.Meta.CreatedAt
prepareMessageMetaDates(cm, &c.Meta)
return cm
}
func prepareMessageMetaDates(cm *models.ChannelMessage, meta *mongomodels.Meta) {
// this is added because status update->modified at field is before createdAt
if cm.CreatedAt.After(meta.ModifiedAt) {
cm.UpdatedAt = cm.CreatedAt
} else {
cm.UpdatedAt = meta.ModifiedAt
}
}
func prepareBody(cm *models.ChannelMessage, body string) {
res := tagRegex.FindAllStringSubmatch(body, -1)
cm.Body = body
if len(res) == 0 {
return
}
for _, element := range res {
tag := element[1][1 : len(element[1])-1]
tag = strings.Split(tag, ":")[3]
tag = "#" + tag
cm.Body = verbalexpressions.New().Find(element[1]).Replace(cm.Body, tag)
}
}
func completePostMigration(su *mongomodels.StatusUpdate, cm *models.ChannelMessage) error {
su.SocialMessageId = cm.Id
return modelhelper.UpdateStatusUpdate(su)
}
func completeCommentMigration(reply *mongomodels.Comment, cm *models.ChannelMessage) error {
reply.SocialMessageId = cm.Id
return modelhelper.UpdateComment(reply)
}
Migration: Interaction created at value is assigned via relationship timestamp
package controller
import (
"errors"
"fmt"
mongomodels "koding/db/models"
"koding/db/mongodb/modelhelper"
"socialapi/models"
"strings"
"github.com/VerbalExpressions/GoVerbalExpressions"
"github.com/koding/logging"
"labix.org/v2/mgo"
"labix.org/v2/mgo/bson"
)
var (
ErrMigrated = errors.New("already migrated")
kodingChannelId int64
tagRegex = verbalexpressions.New().
BeginCapture().
Find("|#:JTag:").
Anything().
Then(":").
Anything().
Then("|").
EndCapture().
Regex()
)
type Controller struct {
log logging.Logger
}
func New(log logging.Logger) (*Controller, error) {
wc := &Controller{
log: log,
}
return wc, nil
}
func (mwc *Controller) Start() error {
if err := mwc.migrateAllTags(); err != nil {
return err
}
if err := mwc.migrateAllPosts(); err != nil {
return err
}
return nil
}
func (mwc *Controller) migrateAllPosts() error {
o := modelhelper.Options{
Sort: "meta.createdAt",
}
s := modelhelper.Selector{
"socialMessageId": modelhelper.Selector{"$exists": false},
}
kodingChannel, err := createGroupChannel("koding")
if err != nil {
return fmt.Errorf("Koding channel cannot be created: %s", err)
}
kodingChannelId = kodingChannel.Id
errCount := 0
handleError := func(su *mongomodels.StatusUpdate, err error) {
mwc.log.Error("an error occured for %s: %s", su.Id.Hex(), err)
errCount++
}
for {
o.Skip = errCount
su, err := modelhelper.GetStatusUpdate(s, o)
if err != nil {
if err == mgo.ErrNotFound {
mwc.log.Notice("Migration completed")
return nil
}
return fmt.Errorf("status update cannot be fetched: %s", err)
}
channelId, err := fetchGroupChannelId(su.Group)
if err != nil {
return fmt.Errorf("channel id cannot be fetched :%s", err)
}
// create channel message
cm := mapStatusUpdateToChannelMessage(&su)
cm.InitialChannelId = channelId
if err := insertChannelMessage(cm, su.OriginId.Hex()); err != nil {
handleError(&su, err)
continue
}
if err := addChannelMessageToMessageList(cm); err != nil {
handleError(&su, err)
continue
}
// create reply messages
if err := migrateComments(cm, &su, channelId); err != nil {
handleError(&su, err)
continue
}
if err := migrateLikes(cm, su.Id); err != nil {
handleError(&su, err)
continue
}
// update mongo status update channelMessageId field
if err := completePostMigration(&su, cm); err != nil {
handleError(&su, err)
continue
}
fmt.Printf("\n\nStatus update var %+v \n\n", su)
}
return nil
}
func createGroupChannel(groupName string) (*models.Channel, error) {
c := models.NewChannel()
c.Name = groupName
c.GroupName = groupName
c.TypeConstant = models.Channel_TYPE_GROUP
group, err := modelhelper.GetGroup(groupName)
if err != nil {
return nil, err
}
// TODO check it causes any error
c.PrivacyConstant = group.Privacy
// find group owner
creatorId, err := fetchGroupOwnerId(group)
if err != nil {
return nil, err
}
c.CreatorId = creatorId
// create channel
if err := c.Create(); err != nil {
return nil, err
}
return c, nil
}
func fetchGroupOwnerId(g *mongomodels.Group) (int64, error) {
// fetch owner relationship
s := modelhelper.Selector{
"targetName": "JAccount",
"as": "owner",
"sourceId": g.Id,
}
r, err := modelhelper.GetRelationship(s)
if err != nil {
return 0, err
}
a := models.NewAccount()
a.OldId = r.TargetId.Hex()
if err := a.FetchOrCreate(); err != nil {
return 0, err
}
return a.Id, nil
}
func insertChannelMessage(cm *models.ChannelMessage, accountOldId string) error {
if err := prepareMessageAccount(cm, accountOldId); err != nil {
return err
}
if err := cm.CreateRaw(); err != nil {
return err
}
return nil
}
func addChannelMessageToMessageList(cm *models.ChannelMessage) error {
cml := models.NewChannelMessageList()
cml.ChannelId = cm.InitialChannelId
cml.MessageId = cm.Id
cml.AddedAt = cm.CreatedAt
return cml.CreateRaw()
}
func (mwc *Controller) migrateComments(parentMessage *models.ChannelMessage, su *mongomodels.StatusUpdate, channelId int64) error {
s := modelhelper.Selector{
"sourceId": su.Id,
"targetName": "JComment",
}
rels, err := modelhelper.GetAllRelationships(s)
if err != nil {
if err == modelhelper.ErrNotFound {
return nil
}
return fmt.Errorf("comment relationships cannot be fetched: %s", err)
}
for _, r := range rels {
comment, err := modelhelper.GetCommentById(r.TargetId.Hex())
if err != nil {
return fmt.Errorf("comment cannot be fetched %s", err)
}
// comment is already migrated
if comment.SocialMessageId != 0 {
continue
}
reply := mapCommentToChannelMessage(comment)
reply.InitialChannelId = channelId
// insert as channel message
if err := insertChannelMessage(reply, comment.OriginId.Hex()); err != nil {
return fmt.Errorf("comment cannot be inserted %s", err)
}
// insert as message reply
mr := models.NewMessageReply()
mr.MessageId = parentMessage.Id
mr.ReplyId = reply.Id
mr.CreatedAt = reply.CreatedAt
if err := mr.CreateRaw(); err != nil {
return fmt.Errorf("comment cannot be inserted to message reply %s", err)
}
if err := migrateLikes(reply, comment.Id); err != nil {
return fmt.Errorf("likes cannot be migrated %s", err)
}
if err := completeCommentMigration(comment, reply); err != nil {
return fmt.Errorf("old comment cannot be flagged with new message id %s", err)
}
}
return nil
}
func (mwc *Controller) migrateLikes(cm *models.ChannelMessage, oldId bson.ObjectId) error {
s := modelhelper.Selector{
"sourceId": oldId,
"as": "like",
}
rels, err := modelhelper.GetAllRelationships(s)
if err != nil {
return fmt.Errorf("likes cannot be fetched %s", err)
}
for _, r := range rels {
a := models.NewAccount()
a.OldId = r.TargetId.Hex()
if err := a.FetchOrCreate(); err != nil {
return fmt.Errorf("interactor account could not found: %s", err)
}
i := models.NewInteraction()
i.MessageId = cm.Id
i.AccountId = a.Id
i.TypeConstant = models.Interaction_TYPE_LIKE
i.CreatedAt = r.TimeStamp
if err := i.CreateRaw(); err != nil {
mwc.log.Error("interaction could not created: %s", err)
}
}
return nil
}
func prepareMessageAccount(cm *models.ChannelMessage, accountOldId string) error {
a := models.NewAccount()
a.OldId = accountOldId
if err := a.FetchOrCreate(); err != nil {
return fmt.Errorf("account could not found: %s", err)
}
cm.AccountId = a.Id
return nil
}
func fetchGroupChannelId(groupName string) (int64, error) {
// koding group channel id is prefetched
if groupName == "koding" {
return kodingChannelId, nil
}
c, err := createGroupChannel(groupName)
if err != nil {
return 0, err
}
return c.Id, nil
}
func mapStatusUpdateToChannelMessage(su *mongomodels.StatusUpdate) *models.ChannelMessage {
cm := models.NewChannelMessage()
cm.Slug = su.Slug
prepareBody(cm, su.Body)
cm.TypeConstant = models.ChannelMessage_TYPE_POST
cm.CreatedAt = su.Meta.CreatedAt
prepareMessageMetaDates(cm, &su.Meta)
return cm
}
func mapCommentToChannelMessage(c *mongomodels.Comment) *models.ChannelMessage {
cm := models.NewChannelMessage()
cm.Body = c.Body
cm.TypeConstant = models.ChannelMessage_TYPE_REPLY
cm.CreatedAt = c.Meta.CreatedAt
prepareMessageMetaDates(cm, &c.Meta)
return cm
}
func prepareMessageMetaDates(cm *models.ChannelMessage, meta *mongomodels.Meta) {
// this is added because status update->modified at field is before createdAt
if cm.CreatedAt.After(meta.ModifiedAt) {
cm.UpdatedAt = cm.CreatedAt
} else {
cm.UpdatedAt = meta.ModifiedAt
}
}
func prepareBody(cm *models.ChannelMessage, body string) {
res := tagRegex.FindAllStringSubmatch(body, -1)
cm.Body = body
if len(res) == 0 {
return
}
for _, element := range res {
tag := element[1][1 : len(element[1])-1]
tag = strings.Split(tag, ":")[3]
tag = "#" + tag
cm.Body = verbalexpressions.New().Find(element[1]).Replace(cm.Body, tag)
}
}
func completePostMigration(su *mongomodels.StatusUpdate, cm *models.ChannelMessage) error {
su.SocialMessageId = cm.Id
return modelhelper.UpdateStatusUpdate(su)
}
func completeCommentMigration(reply *mongomodels.Comment, cm *models.ChannelMessage) error {
reply.SocialMessageId = cm.Id
return modelhelper.UpdateComment(reply)
}
|
// Copyright 2018 The Grafeas Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vulnerability
import (
"testing"
pkgpb "github.com/grafeas/grafeas/proto/v1/package_go_proto"
vulnpb "github.com/grafeas/grafeas/proto/v1/vulnerability_go_proto"
)
func TestValidateVulnerability(t *testing.T) {
tests := []struct {
desc string
v *vulnpb.Vulnerability
wantErrs bool
}{
{
desc: "nil detail, want error(s)",
v: &vulnpb.Vulnerability{
Severity: vulnpb.Severity_CRITICAL,
Details: []*vulnpb.Vulnerability_Detail{
nil,
},
},
wantErrs: true,
},
{
desc: "invalid vulnerability detail, want error(s)",
v: &vulnpb.Vulnerability{
Severity: vulnpb.Severity_CRITICAL,
Details: []*vulnpb.Vulnerability_Detail{
&vulnpb.Vulnerability_Detail{},
},
},
wantErrs: true,
},
{
desc: "valid vulnerability, want success",
v: &vulnpb.Vulnerability{
Severity: vulnpb.Severity_CRITICAL,
Details: []*vulnpb.Vulnerability_Detail{
&vulnpb.Vulnerability_Detail{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
SeverityName: "LOW",
},
},
},
wantErrs: false,
},
}
for _, tt := range tests {
errs := ValidateVulnerability(tt.v)
t.Logf("%q: error(s): %v", tt.desc, errs)
if len(errs) == 0 && tt.wantErrs {
t.Errorf("%q: ValidateVulnerability(%+v): got success, want error(s)", tt.desc, tt.v)
}
if len(errs) > 0 && !tt.wantErrs {
t.Errorf("%q: ValidateVulnerability(%+v): got error(s) %v, want success", tt.desc, tt.v, errs)
}
}
}
func TestValidateVulnerabilityDetail(t *testing.T) {
tests := []struct {
desc string
vd *vulnpb.Vulnerability_Detail
wantErrs bool
}{
{
desc: "missing CPE URI, want error(s)",
vd: &vulnpb.Vulnerability_Detail{},
wantErrs: true,
},
{
desc: "missing package, want error(s)",
vd: &vulnpb.Vulnerability_Detail{
CpeUri: "cpe:/o:debian:debian_linux:7",
},
wantErrs: true,
},
{
desc: "invalid min affected version, want error(s)",
vd: &vulnpb.Vulnerability_Detail{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
SeverityName: "LOW",
MinAffectedVersion: &pkgpb.Version{},
},
wantErrs: true,
},
{
desc: "invalid max affected version, want error(s)",
vd: &vulnpb.Vulnerability_Detail{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
SeverityName: "LOW",
MaxAffectedVersion: &pkgpb.Version{},
},
wantErrs: true,
},
{
desc: "invalid fixed located set, want error(s)",
vd: &vulnpb.Vulnerability_Detail{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
SeverityName: "LOW",
FixedLocation: &vulnpb.VulnerabilityLocation{},
},
wantErrs: true,
},
{
desc: "valid vulnerability details, want success",
vd: &vulnpb.Vulnerability_Detail{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
SeverityName: "LOW",
},
wantErrs: false,
},
}
for _, tt := range tests {
errs := validateVulnerabilityDetail(tt.vd)
t.Logf("%q: error(s): %v", tt.desc, errs)
if len(errs) == 0 && tt.wantErrs {
t.Errorf("%q: validateVulnerabilityDetail(%+v): got success, want error(s)", tt.desc, tt.vd)
}
if len(errs) > 0 && !tt.wantErrs {
t.Errorf("%q: validateVulnerabilityDetail(%+v): got error(s) %v, want success", tt.desc, tt.vd, errs)
}
}
}
func TestValidateVulnerabilityLocation(t *testing.T) {
tests := []struct {
desc string
vl *vulnpb.VulnerabilityLocation
wantErrs bool
}{
{
desc: "missing CPE URI, want error(s)",
vl: &vulnpb.VulnerabilityLocation{},
wantErrs: true,
},
{
desc: "missing package, want error(s)",
vl: &vulnpb.VulnerabilityLocation{
CpeUri: "cpe:/o:debian:debian_linux:7",
},
wantErrs: true,
},
{
desc: "missing version, want error(s)",
vl: &vulnpb.VulnerabilityLocation{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
},
wantErrs: true,
},
{
desc: "version set, but invalid, want error(s)",
vl: &vulnpb.VulnerabilityLocation{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
Version: &pkgpb.Version{},
},
wantErrs: true,
},
{
desc: "version set and invalid, want success",
vl: &vulnpb.VulnerabilityLocation{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
Version: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: false,
},
}
for _, tt := range tests {
errs := validateVulnerabilityLocation(tt.vl)
t.Logf("%q: error(s): %v", tt.desc, errs)
if len(errs) == 0 && tt.wantErrs {
t.Errorf("%q: validateVulnerabilityLocation(%+v): got success, want error(s)", tt.desc, tt.vl)
}
if len(errs) > 0 && !tt.wantErrs {
t.Errorf("%q: validateVulnerabilityLocation(%+v): got error(s) %v, want success", tt.desc, tt.vl, errs)
}
}
}
func TestValidateDetails(t *testing.T) {
tests := []struct {
desc string
d *vulnpb.Details
wantErrs bool
}{
{
desc: "missing package issue, want error(s)",
d: &vulnpb.Details{},
wantErrs: true,
},
{
desc: "empty package issue, want error(s)",
d: &vulnpb.Details{
PackageIssue: []*vulnpb.PackageIssue{},
},
wantErrs: true,
},
{
desc: "nil package issue element, want error(s)",
d: &vulnpb.Details{
PackageIssue: []*vulnpb.PackageIssue{nil},
},
wantErrs: true,
},
{
desc: "invalid package issue, want error(s)",
d: &vulnpb.Details{
PackageIssue: []*vulnpb.PackageIssue{
{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
},
},
},
wantErrs: true,
},
{
desc: "valid details, want success",
d: &vulnpb.Details{
PackageIssue: []*vulnpb.PackageIssue{
{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedPackage: "debian",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedPackage: "debian",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
},
},
wantErrs: false,
},
}
for _, tt := range tests {
errs := ValidateDetails(tt.d)
t.Logf("%q: error(s): %v", tt.desc, errs)
if len(errs) == 0 && tt.wantErrs {
t.Errorf("%q: ValidateDetails(%+v): got success, want error(s)", tt.desc, tt.d)
}
if len(errs) > 0 && !tt.wantErrs {
t.Errorf("%q: ValidateDetails(%+v): got error(s) %v, want success", tt.desc, tt.d, errs)
}
}
}
func TestValidatePackageIssue(t *testing.T) {
tests := []struct {
desc string
p *vulnpb.PackageIssue
wantErrs bool
}{
{
desc: "missing affected cpe uri, want error(s)",
p: &vulnpb.PackageIssue{
AffectedPackage: "debian",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedPackage: "debian",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: true,
},
{
desc: "missing affected package, want error(s)",
p: &vulnpb.PackageIssue{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedPackage: "debian",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: true,
},
{
desc: "missing affected version, want error(s)",
p: &vulnpb.PackageIssue{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedPackage: "debian",
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedPackage: "debian",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: true,
},
{
desc: "missing fixed cpe uri, want error(s)",
p: &vulnpb.PackageIssue{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedPackage: "debian",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedPackage: "debian",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: true,
},
{
desc: "missing fixed package, want error(s)",
p: &vulnpb.PackageIssue{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedPackage: "debian",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: true,
},
{
desc: "missing fixed version, want error(s)",
p: &vulnpb.PackageIssue{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedPackage: "debian",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedPackage: "debian",
},
wantErrs: true,
},
{
desc: "valid package issue, want success",
p: &vulnpb.PackageIssue{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedPackage: "debian",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedPackage: "debian",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: false,
},
}
for _, tt := range tests {
errs := validatePackageIssue(tt.p)
t.Logf("%q: error(s): %v", tt.desc, errs)
if len(errs) == 0 && tt.wantErrs {
t.Errorf("%q: validatePackageIssue(%+v): got success, want error(s)", tt.desc, tt.p)
}
if len(errs) > 0 && !tt.wantErrs {
t.Errorf("%q: validatePackageIssue(%+v): got error(s) %v, want success", tt.desc, tt.p, errs)
}
}
}
commit changes
// Copyright 2018 The Grafeas Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vulnerability
import (
"testing"
pkgpb "github.com/grafeas/grafeas/proto/v1/package_go_proto"
vulnpb "github.com/grafeas/grafeas/proto/v1/vulnerability_go_proto"
)
func TestValidateVulnerability(t *testing.T) {
tests := []struct {
desc string
v *vulnpb.Vulnerability
wantErrs bool
}{
{
desc: "nil detail, want error(s)",
v: &vulnpb.Vulnerability{
Severity: vulnpb.Severity_CRITICAL,
Details: []*vulnpb.Vulnerability_Detail{
nil,
},
},
wantErrs: true,
},
{
desc: "invalid vulnerability detail, want error(s)",
v: &vulnpb.Vulnerability{
Severity: vulnpb.Severity_CRITICAL,
Details: []*vulnpb.Vulnerability_Detail{
&vulnpb.Vulnerability_Detail{},
},
},
wantErrs: true,
},
{
desc: "valid vulnerability, want success",
v: &vulnpb.Vulnerability{
Severity: vulnpb.Severity_CRITICAL,
Details: []*vulnpb.Vulnerability_Detail{
&vulnpb.Vulnerability_Detail{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
SeverityName: "LOW",
},
},
},
wantErrs: false,
},
}
for _, tt := range tests {
errs := ValidateVulnerability(tt.v)
t.Logf("%q: error(s): %v", tt.desc, errs)
if len(errs) == 0 && tt.wantErrs {
t.Errorf("%q: ValidateVulnerability(%+v): got success, want error(s)", tt.desc, tt.v)
}
if len(errs) > 0 && !tt.wantErrs {
t.Errorf("%q: ValidateVulnerability(%+v): got error(s) %v, want success", tt.desc, tt.v, errs)
}
}
}
func TestValidateVulnerabilityDetail(t *testing.T) {
tests := []struct {
desc string
vd *vulnpb.Vulnerability_Detail
wantErrs bool
}{
{
desc: "missing CPE URI, want error(s)",
vd: &vulnpb.Vulnerability_Detail{},
wantErrs: true,
},
{
desc: "missing package, want error(s)",
vd: &vulnpb.Vulnerability_Detail{
CpeUri: "cpe:/o:debian:debian_linux:7",
},
wantErrs: true,
},
{
desc: "invalid min affected version, want error(s)",
vd: &vulnpb.Vulnerability_Detail{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
SeverityName: "LOW",
MinAffectedVersion: &pkgpb.Version{},
},
wantErrs: true,
},
{
desc: "invalid max affected version, want error(s)",
vd: &vulnpb.Vulnerability_Detail{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
SeverityName: "LOW",
MaxAffectedVersion: &pkgpb.Version{},
},
wantErrs: true,
},
{
desc: "invalid fixed located set, want error(s)",
vd: &vulnpb.Vulnerability_Detail{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
SeverityName: "LOW",
FixedLocation: &vulnpb.VulnerabilityLocation{},
},
wantErrs: true,
},
{
desc: "valid vulnerability details, want success",
vd: &vulnpb.Vulnerability_Detail{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
SeverityName: "LOW",
},
wantErrs: false,
},
}
for _, tt := range tests {
errs := validateVulnerabilityDetail(tt.vd)
t.Logf("%q: error(s): %v", tt.desc, errs)
if len(errs) == 0 && tt.wantErrs {
t.Errorf("%q: validateVulnerabilityDetail(%+v): got success, want error(s)", tt.desc, tt.vd)
}
if len(errs) > 0 && !tt.wantErrs {
t.Errorf("%q: validateVulnerabilityDetail(%+v): got error(s) %v, want success", tt.desc, tt.vd, errs)
}
}
}
func TestValidateVulnerabilityLocation(t *testing.T) {
tests := []struct {
desc string
vl *vulnpb.VulnerabilityLocation
wantErrs bool
}{
{
desc: "missing CPE URI, want error(s)",
vl: &vulnpb.VulnerabilityLocation{},
wantErrs: true,
},
{
desc: "missing package, want error(s)",
vl: &vulnpb.VulnerabilityLocation{
CpeUri: "cpe:/o:debian:debian_linux:7",
},
wantErrs: true,
},
{
desc: "missing version, want error(s)",
vl: &vulnpb.VulnerabilityLocation{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
},
wantErrs: true,
},
{
desc: "version set, but invalid, want error(s)",
vl: &vulnpb.VulnerabilityLocation{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
Version: &pkgpb.Version{},
},
wantErrs: true,
},
{
desc: "version set and invalid, want success",
vl: &vulnpb.VulnerabilityLocation{
CpeUri: "cpe:/o:debian:debian_linux:7",
Package: "debian",
Version: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: false,
},
}
for _, tt := range tests {
errs := validateVulnerabilityLocation(tt.vl)
t.Logf("%q: error(s): %v", tt.desc, errs)
if len(errs) == 0 && tt.wantErrs {
t.Errorf("%q: validateVulnerabilityLocation(%+v): got success, want error(s)", tt.desc, tt.vl)
}
if len(errs) > 0 && !tt.wantErrs {
t.Errorf("%q: validateVulnerabilityLocation(%+v): got error(s) %v, want success", tt.desc, tt.vl, errs)
}
}
}
func TestValidateDetails(t *testing.T) {
tests := []struct {
desc string
d *vulnpb.Details
wantErrs bool
}{
{
desc: "missing package issue, want error(s)",
d: &vulnpb.Details{},
wantErrs: true,
},
{
desc: "empty package issue, want error(s)",
d: &vulnpb.Details{
PackageIssue: []*vulnpb.PackageIssue{},
},
wantErrs: true,
},
{
desc: "nil package issue element, want error(s)",
d: &vulnpb.Details{
PackageIssue: []*vulnpb.PackageIssue{nil},
},
wantErrs: true,
},
{
desc: "invalid package issue, want error(s)",
d: &vulnpb.Details{
PackageIssue: []*vulnpb.PackageIssue{
{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
},
},
},
wantErrs: true,
},
{
desc: "valid details, want success",
d: &vulnpb.Details{
PackageIssue: []*vulnpb.PackageIssue{
{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedPackage: "debian",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedPackage: "debian",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
},
},
wantErrs: false,
},
}
for _, tt := range tests {
errs := ValidateDetails(tt.d)
t.Logf("%q: error(s): %v", tt.desc, errs)
if len(errs) == 0 && tt.wantErrs {
t.Errorf("%q: ValidateDetails(%+v): got success, want error(s)", tt.desc, tt.d)
}
if len(errs) > 0 && !tt.wantErrs {
t.Errorf("%q: ValidateDetails(%+v): got error(s) %v, want success", tt.desc, tt.d, errs)
}
}
}cd
func TestValidatePackageIssue(t *testing.T) {
tests := []struct {
desc string
p *vulnpb.PackageIssue
wantErrs bool
}{
{
desc: "missing affected cpe uri, want error(s)",
p: &vulnpb.PackageIssue{
AffectedPackage: "debian",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedPackage: "debian",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: true,
},
{
desc: "missing affected package, want error(s)",
p: &vulnpb.PackageIssue{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedPackage: "debian",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: true,
},
{
desc: "missing affected version, want error(s)",
p: &vulnpb.PackageIssue{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedPackage: "debian",
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedPackage: "debian",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: true,
},
{
desc: "missing fixed cpe uri, want error(s)",
p: &vulnpb.PackageIssue{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedPackage: "debian",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedPackage: "debian",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: true,
},
{
desc: "missing fixed package, want error(s)",
p: &vulnpb.PackageIssue{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedPackage: "debian",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: true,
},
{
desc: "missing fixed version, want error(s)",
p: &vulnpb.PackageIssue{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedPackage: "debian",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedPackage: "debian",
},
wantErrs: true,
},
{
desc: "valid package issue, want success",
p: &vulnpb.PackageIssue{
AffectedCpeUri: "cpe:/o:debian:debian_linux:7",
AffectedPackage: "debian",
AffectedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
FixedCpeUri: "cpe:/o:debian:debian_linux:7",
FixedPackage: "debian",
FixedVersion: &pkgpb.Version{
Name: "1.1.2",
Kind: pkgpb.Version_NORMAL,
},
},
wantErrs: false,
},
}
for _, tt := range tests {
errs := validatePackageIssue(tt.p)
t.Logf("%q: error(s): %v", tt.desc, errs)
if len(errs) == 0 && tt.wantErrs {
t.Errorf("%q: validatePackageIssue(%+v): got success, want error(s)", tt.desc, tt.p)
}
if len(errs) > 0 && !tt.wantErrs {
t.Errorf("%q: validatePackageIssue(%+v): got error(s) %v, want success", tt.desc, tt.p, errs)
}
}
}
|
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package debug_test
import (
"fmt"
"regexp"
gc "gopkg.in/check.v1"
"github.com/juju/juju/worker/uniter/runner/debug"
)
type DebugHooksClientSuite struct{}
var _ = gc.Suite(&DebugHooksClientSuite{})
func (*DebugHooksClientSuite) TestClientScript(c *gc.C) {
ctx := debug.NewHooksContext("foo/8")
// Test the variable substitutions.
result := debug.ClientScript(ctx, nil)
// No variables left behind.
c.Assert(result, gc.Matches, "[^{}]*")
// tmux new-session -d -s {unit_name}
c.Assert(result, gc.Matches, fmt.Sprintf("(.|\n)*tmux new-session -s %s(.|\n)*", regexp.QuoteMeta(ctx.Unit)))
//) 9>{exit_flock}
c.Assert(result, gc.Matches, fmt.Sprintf("(.|\n)*\\) 9>%s(.|\n)*", regexp.QuoteMeta(ctx.ClientExitFileLock())))
//) 8>{entry_flock}
c.Assert(result, gc.Matches, fmt.Sprintf("(.|\n)*\\) 8>%s(.|\n)*", regexp.QuoteMeta(ctx.ClientFileLock())))
// nil is the same as empty slice is the same as "*".
// Also, if "*" is present as well as a named hook,
// it is equivalent to "*".
c.Assert(debug.ClientScript(ctx, nil), gc.Equals, debug.ClientScript(ctx, []string{}))
c.Assert(debug.ClientScript(ctx, []string{"*"}), gc.Equals, debug.ClientScript(ctx, nil))
c.Assert(debug.ClientScript(ctx, []string{"*", "something"}), gc.Equals, debug.ClientScript(ctx, []string{"*"}))
// debug.ClientScript does not validate hook names, as it doesn't have
// a full state API connection to determine valid relation hooks.
expected := fmt.Sprintf(
`(.|\n)*echo "aG9va3M6Ci0gc29tZXRoaW5nIHNvbWV0aGluZ2Vsc2UK" | base64 -d > %s(.|\n)*`,
regexp.QuoteMeta(ctx.ClientFileLock()),
)
c.Assert(debug.ClientScript(ctx, []string{"something somethingelse"}), gc.Matches, expected)
}
Fixed tests.
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package debug_test
import (
"fmt"
"regexp"
gc "gopkg.in/check.v1"
"github.com/juju/juju/worker/uniter/runner/debug"
)
type DebugHooksClientSuite struct{}
var _ = gc.Suite(&DebugHooksClientSuite{})
func (*DebugHooksClientSuite) TestClientScript(c *gc.C) {
ctx := debug.NewHooksContext("foo/8")
// Test the variable substitutions.
result := debug.ClientScript(ctx, nil)
// No variables left behind.
c.Assert(result, gc.Not(gc.Matches), "(.|\n)*{unit_name}(.|\n)*")
c.Assert(result, gc.Not(gc.Matches), "(.|\n)*{tmux_conf}(.|\n)*")
c.Assert(result, gc.Not(gc.Matches), "(.|\n)*{entry_flock}(.|\n)*")
c.Assert(result, gc.Not(gc.Matches), "(.|\n)*{exit_flock}(.|\n)*")
// tmux new-session -d -s {unit_name}
c.Assert(result, gc.Matches, fmt.Sprintf("(.|\n)*tmux attach-session -t %s(.|\n)*", regexp.QuoteMeta(ctx.Unit)))
//) 9>{exit_flock}
c.Assert(result, gc.Matches, fmt.Sprintf("(.|\n)*\\) 9>%s(.|\n)*", regexp.QuoteMeta(ctx.ClientExitFileLock())))
//) 8>{entry_flock}
c.Assert(result, gc.Matches, fmt.Sprintf("(.|\n)*\\) 8>%s(.|\n)*", regexp.QuoteMeta(ctx.ClientFileLock())))
// nil is the same as empty slice is the same as "*".
// Also, if "*" is present as well as a named hook,
// it is equivalent to "*".
c.Assert(debug.ClientScript(ctx, nil), gc.Equals, debug.ClientScript(ctx, []string{}))
c.Assert(debug.ClientScript(ctx, []string{"*"}), gc.Equals, debug.ClientScript(ctx, nil))
c.Assert(debug.ClientScript(ctx, []string{"*", "something"}), gc.Equals, debug.ClientScript(ctx, []string{"*"}))
// debug.ClientScript does not validate hook names, as it doesn't have
// a full state API connection to determine valid relation hooks.
expected := fmt.Sprintf(
`(.|\n)*echo "aG9va3M6Ci0gc29tZXRoaW5nIHNvbWV0aGluZ2Vsc2UK" | base64 -d > %s(.|\n)*`,
regexp.QuoteMeta(ctx.ClientFileLock()),
)
c.Assert(debug.ClientScript(ctx, []string{"something somethingelse"}), gc.Matches, expected)
}
|
package isolated
import (
"fmt"
"regexp"
"code.cloudfoundry.org/cli/api/cloudcontroller/ccversion"
"code.cloudfoundry.org/cli/integration/helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
. "github.com/onsi/gomega/ghttp"
)
var _ = Describe("create-shared-domain command", func() {
Context("Help", func() {
It("displays the help information", func() {
session := helpers.CF("create-shared-domain", "--help")
Eventually(session).Should(Say("NAME:\n"))
Eventually(session).Should(Say(regexp.QuoteMeta("create-shared-domain - Create a domain that can be used by all orgs (admin-only)")))
Eventually(session).Should(Say("USAGE:\n"))
Eventually(session).Should(Say(regexp.QuoteMeta("cf create-shared-domain DOMAIN [--router-group ROUTER_GROUP | --internal]")))
Eventually(session).Should(Say("OPTIONS:\n"))
Eventually(session).Should(Say(`--router-group\s+Routes for this domain will be configured only on the specified router group`))
Eventually(session).Should(Say(`--internal\s+Applications that use internal routes communicate directly on the container network`))
Eventually(session).Should(Say("SEE ALSO:\n"))
Eventually(session).Should(Say("create-domain, domains, router-groups"))
Eventually(session).Should(Exit(0))
})
})
var (
orgName string
spaceName string
domainName string
)
BeforeEach(func() {
orgName = helpers.NewOrgName()
spaceName = helpers.NewSpaceName()
helpers.SetupCF(orgName, spaceName)
domainName = helpers.NewDomainName()
})
When("user is logged in as admin", func() {
When("No optional flags are specified", func() {
When("domain name is valid", func() {
It("should create the shared domain", func() {
session := helpers.CF("create-shared-domain", domainName)
Eventually(session).Should(Say("Creating shared domain %s as admin...", domainName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Exit(0))
session = helpers.CF("domains")
Eventually(session).Should(Say(`%s\s+shared`, domainName))
})
})
When("domain name is invalid", func() {
BeforeEach(func() {
domainName = "invalid-domain-name%*$$#)*" + helpers.RandomName()
})
It("should fail and return an error", func() {
session := helpers.CF("create-shared-domain", domainName)
Eventually(session).Should(Say("Creating shared domain %s as admin...", regexp.QuoteMeta(domainName)))
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say(regexp.QuoteMeta("The domain is invalid: name can contain multiple subdomains, each having only alphanumeric characters and hyphens of up to 63 characters, see RFC 1035.")))
Eventually(session).Should(Exit(1))
})
})
When("domain name is already taken", func() {
BeforeEach(func() {
session := helpers.CF("create-shared-domain", domainName)
Eventually(session).Should(Exit(0))
})
It("should fail and return an error", func() {
session := helpers.CF("create-shared-domain", domainName)
Eventually(session).Should(Say("Creating shared domain %s as admin...", domainName))
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("The domain name is taken: %s", domainName))
Eventually(session).Should(Exit(1))
})
})
})
When("the --internal flag is specified", func() {
When("the CC API version is less than the minimum version specified", func() {
var server *Server
BeforeEach(func() {
server = helpers.StartAndTargetServerWithAPIVersions(ccversion.MinV2ClientVersion, ccversion.MinV3ClientVersion)
})
AfterEach(func() {
server.Close()
})
It("fails with error message that the minimum version is not met", func() {
session := helpers.CF("create-shared-domain", domainName, "--internal", "-v")
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say(`Option '--internal' requires CF API version 2\.115\.0 or higher\. Your target is %s`, ccversion.MinV2ClientVersion))
Eventually(session).Should(Exit(1))
})
})
When("the CC API version meets the minimum version requirement", func() {
BeforeEach(func() {
helpers.SkipIfVersionLessThan(ccversion.MinVersionInternalDomainV2)
})
When("things work as expected", func() {
It("creates a domain with internal flag", func() {
session := helpers.CF("create-shared-domain", domainName, "--internal")
Eventually(session).Should(Say("Creating shared domain %s as admin...", domainName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Exit(0))
session = helpers.CF("domains")
var sharedDomainResponse struct {
Resources []struct {
Entity struct {
Internal bool `json:"internal"`
Name string `json:"name"`
}
}
}
helpers.Curl(&sharedDomainResponse, "/v2/shared_domains?q=name:%s", domainName)
Expect(sharedDomainResponse.Resources).To(HaveLen(1))
isInternal := sharedDomainResponse.Resources[0].Entity.Internal
Expect(isInternal).To(BeTrue())
})
})
When("both --internal and --router-group flags are specified", func() {
It("returns an argument error", func() {
session := helpers.CF("create-shared-domain", domainName, "--router-group", "my-router-group", "--internal")
Eventually(session.Err).Should(Say("Incorrect Usage: The following arguments cannot be used together: --router-group, --internal"))
Eventually(session).Should(Say("FAILED"))
Eventually(session).Should(Exit(1))
})
})
})
})
When("With the --router-group flag", func() {
var routerGroupName string
BeforeEach(func() {
helpers.SkipIfNoRoutingAPI()
})
When("router-group exists", func() {
BeforeEach(func() {
routerGroupName = helpers.FindOrCreateTCPRouterGroup(GinkgoParallelNode())
})
It("should create a new shared domain", func() {
session := helpers.CF("create-shared-domain", domainName, "--router-group", routerGroupName)
Eventually(session).Should(Say("Creating shared domain %s as admin...", domainName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Exit(0))
session = helpers.CF("domains")
Eventually(session).Should(Say(`%s\s+shared`, domainName))
var sharedDomainResponse struct {
Resources []struct {
Entity struct {
RouterGroupGUID string `json:"router_group_guid"`
}
}
}
helpers.Curl(&sharedDomainResponse, "/v2/shared_domains?q=name:%s", domainName)
Expect(sharedDomainResponse.Resources).To(HaveLen(1))
currentRouterGroupGUID := sharedDomainResponse.Resources[0].Entity.RouterGroupGUID
var routerGroupListResponse []struct{ GUID string }
helpers.Curl(&routerGroupListResponse, "/routing/v1/router_groups?name=%s", routerGroupName)
Expect(routerGroupListResponse).To(HaveLen(1))
expectedRouterGroupGUID := routerGroupListResponse[0].GUID
Expect(currentRouterGroupGUID).Should(Equal(expectedRouterGroupGUID))
})
})
When("router-group does not exist", func() {
BeforeEach(func() {
routerGroupName = "not-a-real-router-group"
session := helpers.CF("router-groups")
Consistently(session).ShouldNot(Say(routerGroupName))
Eventually(session).Should(Exit(0))
})
It("should fail and return an error", func() {
session := helpers.CF("create-shared-domain", domainName, "--router-group", routerGroupName)
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("Router group not-a-real-router-group not found"))
Eventually(session).Should(Exit(1))
})
})
})
})
When("user is not logged in as admin", func() {
var (
username string
password string
routerGroupName string
)
BeforeEach(func() {
helpers.LoginCF()
username, password = helpers.CreateUser()
helpers.LoginAs(username, password)
})
It("should not be able to create shared domain", func() {
session := helpers.CF("create-shared-domain", domainName)
Eventually(session).Should(Say(fmt.Sprintf("Creating shared domain %s as %s...", domainName, username)))
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("You are not authorized to perform the requested action"))
Eventually(session).Should(Exit(1))
})
When("with --internal flag", func() {
BeforeEach(func() {
helpers.SkipIfVersionLessThan(ccversion.MinVersionInternalDomainV2)
})
It("should fail and return an unauthorized message", func() {
session := helpers.CF("create-shared-domain", domainName, "--internal")
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("You are not authorized to perform the requested action"))
Eventually(session).Should(Exit(1))
})
})
When("with --router-group flag", func() {
BeforeEach(func() {
helpers.SkipIfNoRoutingAPI()
})
When("router-group exists", func() {
BeforeEach(func() {
routerGroupName = helpers.FindOrCreateTCPRouterGroup(GinkgoParallelNode())
})
It("should fail and return an unauthorized message", func() {
session := helpers.CF("create-shared-domain", domainName, "--router-group", routerGroupName)
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).ShouldNot(Say("Error Code: 401"))
Eventually(session.Err).Should(Say("You are not authorized to perform the requested action"))
Eventually(session).Should(Exit(1))
})
})
When("router-group does not exists", func() {
BeforeEach(func() {
routerGroupName = "invalid-router-group"
})
It("should fail and return an unauthorized message", func() {
session := helpers.CF("create-shared-domain", domainName, "--router-group", routerGroupName)
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).ShouldNot(Say("Error Code: 401"))
Eventually(session.Err).Should(Say("You are not authorized to perform the requested action"))
Eventually(session).Should(Exit(1))
})
})
})
})
})
Assert session exits
Integration tests need to wait for the session to exit or we get
multiple test threads trying to open a temp file on Windows, which
causes the tests to fail.
Signed-off-by: Magesh Kumar Murali <3648c978775ca4de568ac474a3a4c21a8c5d85a9@pivotal.io>
package isolated
import (
"fmt"
"regexp"
"code.cloudfoundry.org/cli/api/cloudcontroller/ccversion"
"code.cloudfoundry.org/cli/integration/helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
. "github.com/onsi/gomega/ghttp"
)
var _ = Describe("create-shared-domain command", func() {
Context("Help", func() {
It("displays the help information", func() {
session := helpers.CF("create-shared-domain", "--help")
Eventually(session).Should(Say("NAME:\n"))
Eventually(session).Should(Say(regexp.QuoteMeta("create-shared-domain - Create a domain that can be used by all orgs (admin-only)")))
Eventually(session).Should(Say("USAGE:\n"))
Eventually(session).Should(Say(regexp.QuoteMeta("cf create-shared-domain DOMAIN [--router-group ROUTER_GROUP | --internal]")))
Eventually(session).Should(Say("OPTIONS:\n"))
Eventually(session).Should(Say(`--router-group\s+Routes for this domain will be configured only on the specified router group`))
Eventually(session).Should(Say(`--internal\s+Applications that use internal routes communicate directly on the container network`))
Eventually(session).Should(Say("SEE ALSO:\n"))
Eventually(session).Should(Say("create-domain, domains, router-groups"))
Eventually(session).Should(Exit(0))
})
})
var (
orgName string
spaceName string
domainName string
)
BeforeEach(func() {
orgName = helpers.NewOrgName()
spaceName = helpers.NewSpaceName()
helpers.SetupCF(orgName, spaceName)
domainName = helpers.NewDomainName()
})
When("user is logged in as admin", func() {
When("No optional flags are specified", func() {
When("domain name is valid", func() {
It("should create the shared domain", func() {
session := helpers.CF("create-shared-domain", domainName)
Eventually(session).Should(Say("Creating shared domain %s as admin...", domainName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Exit(0))
session = helpers.CF("domains")
Eventually(session).Should(Say(`%s\s+shared`, domainName))
Eventually(session).Should(Exit(0))
})
})
When("domain name is invalid", func() {
BeforeEach(func() {
domainName = "invalid-domain-name%*$$#)*" + helpers.RandomName()
})
It("should fail and return an error", func() {
session := helpers.CF("create-shared-domain", domainName)
Eventually(session).Should(Say("Creating shared domain %s as admin...", regexp.QuoteMeta(domainName)))
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say(regexp.QuoteMeta("The domain is invalid: name can contain multiple subdomains, each having only alphanumeric characters and hyphens of up to 63 characters, see RFC 1035.")))
Eventually(session).Should(Exit(1))
})
})
When("domain name is already taken", func() {
BeforeEach(func() {
session := helpers.CF("create-shared-domain", domainName)
Eventually(session).Should(Exit(0))
})
It("should fail and return an error", func() {
session := helpers.CF("create-shared-domain", domainName)
Eventually(session).Should(Say("Creating shared domain %s as admin...", domainName))
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("The domain name is taken: %s", domainName))
Eventually(session).Should(Exit(1))
})
})
})
When("the --internal flag is specified", func() {
When("the CC API version is less than the minimum version specified", func() {
var server *Server
BeforeEach(func() {
server = helpers.StartAndTargetServerWithAPIVersions(ccversion.MinV2ClientVersion, ccversion.MinV3ClientVersion)
})
AfterEach(func() {
server.Close()
})
It("fails with error message that the minimum version is not met", func() {
session := helpers.CF("create-shared-domain", domainName, "--internal", "-v")
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say(`Option '--internal' requires CF API version 2\.115\.0 or higher\. Your target is %s`, ccversion.MinV2ClientVersion))
Eventually(session).Should(Exit(1))
})
})
When("the CC API version meets the minimum version requirement", func() {
BeforeEach(func() {
helpers.SkipIfVersionLessThan(ccversion.MinVersionInternalDomainV2)
})
When("things work as expected", func() {
It("creates a domain with internal flag", func() {
session := helpers.CF("create-shared-domain", domainName, "--internal")
Eventually(session).Should(Say("Creating shared domain %s as admin...", domainName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Exit(0))
session = helpers.CF("domains")
var sharedDomainResponse struct {
Resources []struct {
Entity struct {
Internal bool `json:"internal"`
Name string `json:"name"`
}
}
}
helpers.Curl(&sharedDomainResponse, "/v2/shared_domains?q=name:%s", domainName)
Expect(sharedDomainResponse.Resources).To(HaveLen(1))
isInternal := sharedDomainResponse.Resources[0].Entity.Internal
Expect(isInternal).To(BeTrue())
})
})
When("both --internal and --router-group flags are specified", func() {
It("returns an argument error", func() {
session := helpers.CF("create-shared-domain", domainName, "--router-group", "my-router-group", "--internal")
Eventually(session.Err).Should(Say("Incorrect Usage: The following arguments cannot be used together: --router-group, --internal"))
Eventually(session).Should(Say("FAILED"))
Eventually(session).Should(Exit(1))
})
})
})
})
When("With the --router-group flag", func() {
var routerGroupName string
BeforeEach(func() {
helpers.SkipIfNoRoutingAPI()
})
When("router-group exists", func() {
BeforeEach(func() {
routerGroupName = helpers.FindOrCreateTCPRouterGroup(GinkgoParallelNode())
})
It("should create a new shared domain", func() {
session := helpers.CF("create-shared-domain", domainName, "--router-group", routerGroupName)
Eventually(session).Should(Say("Creating shared domain %s as admin...", domainName))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Exit(0))
session = helpers.CF("domains")
Eventually(session).Should(Say(`%s\s+shared`, domainName))
var sharedDomainResponse struct {
Resources []struct {
Entity struct {
RouterGroupGUID string `json:"router_group_guid"`
}
}
}
helpers.Curl(&sharedDomainResponse, "/v2/shared_domains?q=name:%s", domainName)
Expect(sharedDomainResponse.Resources).To(HaveLen(1))
currentRouterGroupGUID := sharedDomainResponse.Resources[0].Entity.RouterGroupGUID
var routerGroupListResponse []struct{ GUID string }
helpers.Curl(&routerGroupListResponse, "/routing/v1/router_groups?name=%s", routerGroupName)
Expect(routerGroupListResponse).To(HaveLen(1))
expectedRouterGroupGUID := routerGroupListResponse[0].GUID
Expect(currentRouterGroupGUID).Should(Equal(expectedRouterGroupGUID))
})
})
When("router-group does not exist", func() {
BeforeEach(func() {
routerGroupName = "not-a-real-router-group"
session := helpers.CF("router-groups")
Consistently(session).ShouldNot(Say(routerGroupName))
Eventually(session).Should(Exit(0))
})
It("should fail and return an error", func() {
session := helpers.CF("create-shared-domain", domainName, "--router-group", routerGroupName)
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("Router group not-a-real-router-group not found"))
Eventually(session).Should(Exit(1))
})
})
})
})
When("user is not logged in as admin", func() {
var (
username string
password string
routerGroupName string
)
BeforeEach(func() {
helpers.LoginCF()
username, password = helpers.CreateUser()
helpers.LoginAs(username, password)
})
It("should not be able to create shared domain", func() {
session := helpers.CF("create-shared-domain", domainName)
Eventually(session).Should(Say(fmt.Sprintf("Creating shared domain %s as %s...", domainName, username)))
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("You are not authorized to perform the requested action"))
Eventually(session).Should(Exit(1))
})
When("with --internal flag", func() {
BeforeEach(func() {
helpers.SkipIfVersionLessThan(ccversion.MinVersionInternalDomainV2)
})
It("should fail and return an unauthorized message", func() {
session := helpers.CF("create-shared-domain", domainName, "--internal")
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).Should(Say("You are not authorized to perform the requested action"))
Eventually(session).Should(Exit(1))
})
})
When("with --router-group flag", func() {
BeforeEach(func() {
helpers.SkipIfNoRoutingAPI()
})
When("router-group exists", func() {
BeforeEach(func() {
routerGroupName = helpers.FindOrCreateTCPRouterGroup(GinkgoParallelNode())
})
It("should fail and return an unauthorized message", func() {
session := helpers.CF("create-shared-domain", domainName, "--router-group", routerGroupName)
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).ShouldNot(Say("Error Code: 401"))
Eventually(session.Err).Should(Say("You are not authorized to perform the requested action"))
Eventually(session).Should(Exit(1))
})
})
When("router-group does not exists", func() {
BeforeEach(func() {
routerGroupName = "invalid-router-group"
})
It("should fail and return an unauthorized message", func() {
session := helpers.CF("create-shared-domain", domainName, "--router-group", routerGroupName)
Eventually(session).Should(Say("FAILED"))
Eventually(session.Err).ShouldNot(Say("Error Code: 401"))
Eventually(session.Err).Should(Say("You are not authorized to perform the requested action"))
Eventually(session).Should(Exit(1))
})
})
})
})
})
|
package google
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/http"
"net/url"
"time"
)
const (
// FCMSendEndpoint is the endpoint for sending message to the Firebase Cloud Messaging (FCM) server.
// See more on https://firebase.google.com/docs/cloud-messaging/server
FCMSendEndpoint = "https://fcm.googleapis.com/fcm/senda"
// GCMSendEndpoint is the endpoint for sending messages to the Google Cloud Messaging (GCM) server.
// Firebase Cloud Messaging (FCM) is the new version of GCM. Should use new endpoint.
// See more on https://firebase.google.com/support/faq/#gcm-fcm
GCMSendEndpoint = "https://gcm-http.googleapis.com/gcm/send"
)
const (
// Initial delay before first retry, without jitter.
backoffInitialDelay = 1000
// Maximum delay before a retry.
maxBackoffDelay = 1024000
// maxRegistrationIDs are max number of registration IDs in one message.
maxRegistrationIDs = 1000
// maxTimeToLive is max time GCM storage can store messages when the device is offline
maxTimeToLive = 2419200 // 4 weeks
)
// Client abstracts the interaction between the application server and the
// GCM server. The developer must obtain an API key from the Google APIs
// Console page and pass it to the Client so that it can perform authorized
// requests on the application server's behalf. To send a message to one or
// more devices use the Client's Send or SendNoRetry methods.
type Client struct {
ApiKey string
URL string
Http *http.Client
}
// NewClient returns a new sender with the given URL and apiKey.
// If one of input is empty or URL is malformed, returns error.
// It sets http.DefaultHTTP client for http connection to server.
// If you need our own configuration overwrite it.
func NewClient(urlString, apiKey string) (*Client, error) {
if len(urlString) == 0 {
return nil, fmt.Errorf("missing GCM/FCM endpoint url")
}
if len(apiKey) == 0 {
return nil, fmt.Errorf("missing API Key")
}
if _, err := url.Parse(urlString); err != nil {
return nil, fmt.Errorf("failed to parse URL %q: %s", urlString, err)
}
return &Client{
URL: urlString,
ApiKey: apiKey,
Http: http.DefaultClient,
}, nil
}
// SendNoRetry sends a message to the GCM server without retrying in case of
// service unavailability. A non-nil error is returned if a non-recoverable
// error occurs (i.e. if the response status is not "200 OK").
func (s *Client) SendNoRetry(msg *Message) (*Response, error) {
if err := msg.validate(); err != nil {
return nil, err
}
return s.send(msg)
}
// Send sends a message to the GCM server, retrying in case of service
// unavailability. A non-nil error is returned if a non-recoverable
// error occurs (i.e. if the response status is not "200 OK").
//
// Note that messages are retried using exponential backoff, and as a
// result, this method may block for several seconds.
func (s *Client) Send(msg *Message, retries int) (*Response, error) {
if err := msg.validate(); err != nil {
return nil, err
}
if retries < 0 {
return nil, errors.New("'retries' must not be negative.")
}
// Send the message for the first time.
resp, err := s.send(msg)
if err != nil {
return nil, err
} else if resp.Failure == 0 || retries == 0 {
return resp, nil
}
// One or more messages failed to send.
regIDs := msg.RegistrationIDs
allResults := make(map[string]Result, len(regIDs))
backoff := backoffInitialDelay
for i := 0; updateStatus(msg, resp, allResults) > 0 && i < retries; i++ {
sleepTime := backoff/2 + rand.Intn(backoff)
time.Sleep(time.Duration(sleepTime) * time.Millisecond)
backoff = min(2*backoff, maxBackoffDelay)
if resp, err = s.send(msg); err != nil {
msg.RegistrationIDs = regIDs
return nil, err
}
}
// Bring the message back to its original state.
msg.RegistrationIDs = regIDs
// Create a Response containing the overall results.
finalResults := make([]Result, len(regIDs))
var success, failure, canonicalIDs int
for i := 0; i < len(regIDs); i++ {
result, _ := allResults[regIDs[i]]
finalResults[i] = result
if result.MessageID != "" {
if result.RegistrationID != "" {
canonicalIDs++
}
success++
} else {
failure++
}
}
return &Response{
// Return the most recent multicast id.
MulticastID: resp.MulticastID,
Success: success,
Failure: failure,
CanonicalIDs: canonicalIDs,
Results: finalResults,
}, nil
}
func (s *Client) send(msg *Message) (*Response, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
if err := encoder.Encode(msg); err != nil {
return nil, err
}
req, err := http.NewRequest("POST", s.URL, &buf)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", fmt.Sprintf("key=%s", s.ApiKey))
req.Header.Add("Content-Type", "application/json")
resp, err := s.Http.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("invalid status code %d: %s", resp.StatusCode, resp.Status)
}
var response Response
decoder := json.NewDecoder(resp.Body)
if err := decoder.Decode(&response); err != nil {
return nil, err
}
return &response, err
}
// updateStatus updates the status of the messages sent to devices and
// returns the number of recoverable errors that could be retried.
func updateStatus(msg *Message, resp *Response, allResults map[string]Result) int {
unsentRegIDs := make([]string, 0, resp.Failure)
for i := 0; i < len(resp.Results); i++ {
regID := msg.RegistrationIDs[i]
allResults[regID] = resp.Results[i]
if resp.Results[i].Error == "Unavailable" {
unsentRegIDs = append(unsentRegIDs, regID)
}
}
msg.RegistrationIDs = unsentRegIDs
return len(unsentRegIDs)
}
// min returns the smaller of two integers. For exciting religious wars
// about why this wasn't included in the "math" package, see this thread:
// https://groups.google.com/d/topic/golang-nuts/dbyqx_LGUxM/discussion
func min(a, b int) int {
if a < b {
return a
}
return b
}
Fix endpoint
package google
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"math/rand"
"net/http"
"net/url"
"time"
)
const (
// FCMSendEndpoint is the endpoint for sending message to the Firebase Cloud Messaging (FCM) server.
// See more on https://firebase.google.com/docs/cloud-messaging/server
FCMSendEndpoint = "https://fcm.googleapis.com/fcm/send"
// GCMSendEndpoint is the endpoint for sending messages to the Google Cloud Messaging (GCM) server.
// Firebase Cloud Messaging (FCM) is the new version of GCM. Should use new endpoint.
// See more on https://firebase.google.com/support/faq/#gcm-fcm
GCMSendEndpoint = "https://gcm-http.googleapis.com/gcm/send"
)
const (
// Initial delay before first retry, without jitter.
backoffInitialDelay = 1000
// Maximum delay before a retry.
maxBackoffDelay = 1024000
// maxRegistrationIDs are max number of registration IDs in one message.
maxRegistrationIDs = 1000
// maxTimeToLive is max time GCM storage can store messages when the device is offline
maxTimeToLive = 2419200 // 4 weeks
)
// Client abstracts the interaction between the application server and the
// GCM server. The developer must obtain an API key from the Google APIs
// Console page and pass it to the Client so that it can perform authorized
// requests on the application server's behalf. To send a message to one or
// more devices use the Client's Send or SendNoRetry methods.
type Client struct {
ApiKey string
URL string
Http *http.Client
}
// NewClient returns a new sender with the given URL and apiKey.
// If one of input is empty or URL is malformed, returns error.
// It sets http.DefaultHTTP client for http connection to server.
// If you need our own configuration overwrite it.
func NewClient(urlString, apiKey string) (*Client, error) {
if len(urlString) == 0 {
return nil, fmt.Errorf("missing GCM/FCM endpoint url")
}
if len(apiKey) == 0 {
return nil, fmt.Errorf("missing API Key")
}
if _, err := url.Parse(urlString); err != nil {
return nil, fmt.Errorf("failed to parse URL %q: %s", urlString, err)
}
return &Client{
URL: urlString,
ApiKey: apiKey,
Http: http.DefaultClient,
}, nil
}
// SendNoRetry sends a message to the GCM server without retrying in case of
// service unavailability. A non-nil error is returned if a non-recoverable
// error occurs (i.e. if the response status is not "200 OK").
func (s *Client) SendNoRetry(msg *Message) (*Response, error) {
if err := msg.validate(); err != nil {
return nil, err
}
return s.send(msg)
}
// Send sends a message to the GCM server, retrying in case of service
// unavailability. A non-nil error is returned if a non-recoverable
// error occurs (i.e. if the response status is not "200 OK").
//
// Note that messages are retried using exponential backoff, and as a
// result, this method may block for several seconds.
func (s *Client) Send(msg *Message, retries int) (*Response, error) {
if err := msg.validate(); err != nil {
return nil, err
}
if retries < 0 {
return nil, errors.New("'retries' must not be negative.")
}
// Send the message for the first time.
resp, err := s.send(msg)
if err != nil {
return nil, err
} else if resp.Failure == 0 || retries == 0 {
return resp, nil
}
// One or more messages failed to send.
regIDs := msg.RegistrationIDs
allResults := make(map[string]Result, len(regIDs))
backoff := backoffInitialDelay
for i := 0; updateStatus(msg, resp, allResults) > 0 && i < retries; i++ {
sleepTime := backoff/2 + rand.Intn(backoff)
time.Sleep(time.Duration(sleepTime) * time.Millisecond)
backoff = min(2*backoff, maxBackoffDelay)
if resp, err = s.send(msg); err != nil {
msg.RegistrationIDs = regIDs
return nil, err
}
}
// Bring the message back to its original state.
msg.RegistrationIDs = regIDs
// Create a Response containing the overall results.
finalResults := make([]Result, len(regIDs))
var success, failure, canonicalIDs int
for i := 0; i < len(regIDs); i++ {
result, _ := allResults[regIDs[i]]
finalResults[i] = result
if result.MessageID != "" {
if result.RegistrationID != "" {
canonicalIDs++
}
success++
} else {
failure++
}
}
return &Response{
// Return the most recent multicast id.
MulticastID: resp.MulticastID,
Success: success,
Failure: failure,
CanonicalIDs: canonicalIDs,
Results: finalResults,
}, nil
}
func (s *Client) send(msg *Message) (*Response, error) {
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
if err := encoder.Encode(msg); err != nil {
return nil, err
}
req, err := http.NewRequest("POST", s.URL, &buf)
if err != nil {
return nil, err
}
req.Header.Add("Authorization", fmt.Sprintf("key=%s", s.ApiKey))
req.Header.Add("Content-Type", "application/json")
resp, err := s.Http.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("invalid status code %d: %s", resp.StatusCode, resp.Status)
}
var response Response
decoder := json.NewDecoder(resp.Body)
if err := decoder.Decode(&response); err != nil {
return nil, err
}
return &response, err
}
// updateStatus updates the status of the messages sent to devices and
// returns the number of recoverable errors that could be retried.
func updateStatus(msg *Message, resp *Response, allResults map[string]Result) int {
unsentRegIDs := make([]string, 0, resp.Failure)
for i := 0; i < len(resp.Results); i++ {
regID := msg.RegistrationIDs[i]
allResults[regID] = resp.Results[i]
if resp.Results[i].Error == "Unavailable" {
unsentRegIDs = append(unsentRegIDs, regID)
}
}
msg.RegistrationIDs = unsentRegIDs
return len(unsentRegIDs)
}
// min returns the smaller of two integers. For exciting religious wars
// about why this wasn't included in the "math" package, see this thread:
// https://groups.google.com/d/topic/golang-nuts/dbyqx_LGUxM/discussion
func min(a, b int) int {
if a < b {
return a
}
return b
}
|
package aws
import (
"fmt"
"reflect"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/opsworks"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
// These tests assume the existence of predefined Opsworks IAM roles named `aws-opsworks-ec2-role`
// and `aws-opsworks-service-role`.
func TestAccAWSOpsworksCustomLayer(t *testing.T) {
stackName := fmt.Sprintf("tf-%d", acctest.RandInt())
var opslayer opsworks.Layer
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAwsOpsworksCustomLayerDestroy,
Steps: []resource.TestStep{
{
Config: testAccAwsOpsworksCustomLayerConfigNoVpcCreate(stackName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSOpsworksCustomLayerExists(
"aws_opsworks_custom_layer.tf-acc", &opslayer),
testAccCheckAWSOpsworksCreateLayerAttributes(&opslayer, stackName),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "name", stackName,
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "auto_assign_elastic_ips", "false",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "auto_healing", "true",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "drain_elb_on_shutdown", "true",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "instance_shutdown_timeout", "300",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "custom_security_group_ids.#", "2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.#", "2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.1368285564", "git",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.2937857443", "golang",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.#", "1",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.type", "gp2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.number_of_disks", "2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.mount_point", "/home",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.size", "100",
),
),
},
{
Config: testAccAwsOpsworksCustomLayerConfigUpdate(stackName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "name", stackName,
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "drain_elb_on_shutdown", "false",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "instance_shutdown_timeout", "120",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "custom_security_group_ids.#", "3",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.#", "3",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.1368285564", "git",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.2937857443", "golang",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.4101929740", "subversion",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.#", "2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.type", "gp2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.number_of_disks", "2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.mount_point", "/home",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.size", "100",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.type", "io1",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.number_of_disks", "4",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.mount_point", "/var",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.size", "100",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.raid_level", "1",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.iops", "3000",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "custom_json", `{"layer_key":"layer_value2"}`,
),
),
},
},
})
}
func testAccCheckAWSOpsworksCustomLayerExists(
n string, opslayer *opsworks.Layer) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).opsworksconn
params := &opsworks.DescribeLayersInput{
LayerIds: []*string{aws.String(rs.Primary.ID)},
}
resp, err := conn.DescribeLayers(params)
if err != nil {
return err
}
if v := len(resp.Layers); v != 1 {
return fmt.Errorf("Expected 1 response returned, got %d", v)
}
*opslayer = *resp.Layers[0]
return nil
}
}
func testAccCheckAWSOpsworksCreateLayerAttributes(
opslayer *opsworks.Layer, stackName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
if *opslayer.Name != stackName {
return fmt.Errorf("Unexpected name: %s", *opslayer.Name)
}
if *opslayer.AutoAssignElasticIps {
return fmt.Errorf(
"Unexpected AutoAssignElasticIps: %t", *opslayer.AutoAssignElasticIps)
}
if !*opslayer.EnableAutoHealing {
return fmt.Errorf(
"Unexpected EnableAutoHealing: %t", *opslayer.EnableAutoHealing)
}
if !*opslayer.LifecycleEventConfiguration.Shutdown.DelayUntilElbConnectionsDrained {
return fmt.Errorf(
"Unexpected DelayUntilElbConnectionsDrained: %t",
*opslayer.LifecycleEventConfiguration.Shutdown.DelayUntilElbConnectionsDrained)
}
if *opslayer.LifecycleEventConfiguration.Shutdown.ExecutionTimeout != 300 {
return fmt.Errorf(
"Unexpected ExecutionTimeout: %d",
*opslayer.LifecycleEventConfiguration.Shutdown.ExecutionTimeout)
}
if v := len(opslayer.CustomSecurityGroupIds); v != 2 {
return fmt.Errorf("Expected 2 customSecurityGroupIds, got %d", v)
}
expectedPackages := []*string{
aws.String("git"),
aws.String("golang"),
}
if !reflect.DeepEqual(expectedPackages, opslayer.Packages) {
return fmt.Errorf("Unexpected Packages: %v", aws.StringValueSlice(opslayer.Packages))
}
expectedEbsVolumes := []*opsworks.VolumeConfiguration{
{
VolumeType: aws.String("gp2"),
NumberOfDisks: aws.Int64(2),
MountPoint: aws.String("/home"),
Size: aws.Int64(100),
RaidLevel: aws.Int64(0),
},
}
if !reflect.DeepEqual(expectedEbsVolumes, opslayer.VolumeConfigurations) {
return fmt.Errorf("Unnexpected VolumeConfiguration: %s", opslayer.VolumeConfigurations)
}
return nil
}
}
func testAccCheckAwsOpsworksCustomLayerDestroy(s *terraform.State) error {
opsworksconn := testAccProvider.Meta().(*AWSClient).opsworksconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_opsworks_custom_layer" {
continue
}
req := &opsworks.DescribeLayersInput{
LayerIds: []*string{
aws.String(rs.Primary.ID),
},
}
_, err := opsworksconn.DescribeLayers(req)
if err != nil {
if awserr, ok := err.(awserr.Error); ok {
if awserr.Code() == "ResourceNotFoundException" {
// not found, good to go
return nil
}
}
return err
}
}
return fmt.Errorf("Fall through error on OpsWorks custom layer test")
}
func testAccAwsOpsworksCustomLayerSecurityGroups(name string) string {
return fmt.Sprintf(`
resource "aws_security_group" "tf-ops-acc-layer1" {
name = "%s-layer1"
ingress {
from_port = 8
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "tf-ops-acc-layer2" {
name = "%s-layer2"
ingress {
from_port = 8
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}`, name, name)
}
func testAccAwsOpsworksCustomLayerConfigNoVpcCreate(name string) string {
return fmt.Sprintf(`
resource "aws_opsworks_custom_layer" "tf-acc" {
stack_id = "${aws_opsworks_stack.tf-acc.id}"
name = "%s"
short_name = "tf-ops-acc-custom-layer"
auto_assign_public_ips = true
custom_security_group_ids = [
"${aws_security_group.tf-ops-acc-layer1.id}",
"${aws_security_group.tf-ops-acc-layer2.id}",
]
drain_elb_on_shutdown = true
instance_shutdown_timeout = 300
system_packages = [
"git",
"golang",
]
ebs_volume {
type = "gp2"
number_of_disks = 2
mount_point = "/home"
size = 100
raid_level = 0
}
}
%s
%s
`, name, testAccAwsOpsworksStackConfigNoVpcCreate(name), testAccAwsOpsworksCustomLayerSecurityGroups(name))
}
func testAccAwsOpsworksCustomLayerConfigVpcCreate(name string) string {
return fmt.Sprintf(`
provider "aws" {
region = "us-west-2"
}
resource "aws_opsworks_custom_layer" "tf-acc" {
stack_id = "${aws_opsworks_stack.tf-acc.id}"
name = "%s"
short_name = "tf-ops-acc-custom-layer"
auto_assign_public_ips = false
custom_security_group_ids = [
"${aws_security_group.tf-ops-acc-layer1.id}",
"${aws_security_group.tf-ops-acc-layer2.id}",
]
drain_elb_on_shutdown = true
instance_shutdown_timeout = 300
system_packages = [
"git",
"golang",
]
ebs_volume {
type = "gp2"
number_of_disks = 2
mount_point = "/home"
size = 100
raid_level = 0
}
}
%s
%s
`, name, testAccAwsOpsworksStackConfigVpcCreate(name), testAccAwsOpsworksCustomLayerSecurityGroups(name))
}
func testAccAwsOpsworksCustomLayerConfigUpdate(name string) string {
return fmt.Sprintf(`
provider "aws" {
region = "us-east-1"
}
resource "aws_security_group" "tf-ops-acc-layer3" {
name = "tf-ops-acc-layer3"
ingress {
from_port = 8
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_opsworks_custom_layer" "tf-acc" {
stack_id = "${aws_opsworks_stack.tf-acc.id}"
name = "%s"
short_name = "tf-ops-acc-custom-layer"
auto_assign_public_ips = true
custom_security_group_ids = [
"${aws_security_group.tf-ops-acc-layer1.id}",
"${aws_security_group.tf-ops-acc-layer2.id}",
"${aws_security_group.tf-ops-acc-layer3.id}",
]
drain_elb_on_shutdown = false
instance_shutdown_timeout = 120
system_packages = [
"git",
"golang",
"subversion",
]
ebs_volume {
type = "gp2"
number_of_disks = 2
mount_point = "/home"
size = 100
raid_level = 0
}
ebs_volume {
type = "io1"
number_of_disks = 4
mount_point = "/var"
size = 100
raid_level = 1
iops = 3000
}
custom_json = "{\"layer_key\": \"layer_value2\"}"
}
%s
%s
`, name, testAccAwsOpsworksStackConfigNoVpcCreate(name), testAccAwsOpsworksCustomLayerSecurityGroups(name))
}
update test to remove dupe provider definition
package aws
import (
"fmt"
"reflect"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/opsworks"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
// These tests assume the existence of predefined Opsworks IAM roles named `aws-opsworks-ec2-role`
// and `aws-opsworks-service-role`.
func TestAccAWSOpsworksCustomLayer(t *testing.T) {
stackName := fmt.Sprintf("tf-%d", acctest.RandInt())
var opslayer opsworks.Layer
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAwsOpsworksCustomLayerDestroy,
Steps: []resource.TestStep{
{
Config: testAccAwsOpsworksCustomLayerConfigNoVpcCreate(stackName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSOpsworksCustomLayerExists(
"aws_opsworks_custom_layer.tf-acc", &opslayer),
testAccCheckAWSOpsworksCreateLayerAttributes(&opslayer, stackName),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "name", stackName,
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "auto_assign_elastic_ips", "false",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "auto_healing", "true",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "drain_elb_on_shutdown", "true",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "instance_shutdown_timeout", "300",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "custom_security_group_ids.#", "2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.#", "2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.1368285564", "git",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.2937857443", "golang",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.#", "1",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.type", "gp2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.number_of_disks", "2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.mount_point", "/home",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.size", "100",
),
),
},
{
Config: testAccAwsOpsworksCustomLayerConfigUpdate(stackName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "name", stackName,
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "drain_elb_on_shutdown", "false",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "instance_shutdown_timeout", "120",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "custom_security_group_ids.#", "3",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.#", "3",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.1368285564", "git",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.2937857443", "golang",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "system_packages.4101929740", "subversion",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.#", "2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.type", "gp2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.number_of_disks", "2",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.mount_point", "/home",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.3575749636.size", "100",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.type", "io1",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.number_of_disks", "4",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.mount_point", "/var",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.size", "100",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.raid_level", "1",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "ebs_volume.1266957920.iops", "3000",
),
resource.TestCheckResourceAttr(
"aws_opsworks_custom_layer.tf-acc", "custom_json", `{"layer_key":"layer_value2"}`,
),
),
},
},
})
}
func testAccCheckAWSOpsworksCustomLayerExists(
n string, opslayer *opsworks.Layer) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).opsworksconn
params := &opsworks.DescribeLayersInput{
LayerIds: []*string{aws.String(rs.Primary.ID)},
}
resp, err := conn.DescribeLayers(params)
if err != nil {
return err
}
if v := len(resp.Layers); v != 1 {
return fmt.Errorf("Expected 1 response returned, got %d", v)
}
*opslayer = *resp.Layers[0]
return nil
}
}
func testAccCheckAWSOpsworksCreateLayerAttributes(
opslayer *opsworks.Layer, stackName string) resource.TestCheckFunc {
return func(s *terraform.State) error {
if *opslayer.Name != stackName {
return fmt.Errorf("Unexpected name: %s", *opslayer.Name)
}
if *opslayer.AutoAssignElasticIps {
return fmt.Errorf(
"Unexpected AutoAssignElasticIps: %t", *opslayer.AutoAssignElasticIps)
}
if !*opslayer.EnableAutoHealing {
return fmt.Errorf(
"Unexpected EnableAutoHealing: %t", *opslayer.EnableAutoHealing)
}
if !*opslayer.LifecycleEventConfiguration.Shutdown.DelayUntilElbConnectionsDrained {
return fmt.Errorf(
"Unexpected DelayUntilElbConnectionsDrained: %t",
*opslayer.LifecycleEventConfiguration.Shutdown.DelayUntilElbConnectionsDrained)
}
if *opslayer.LifecycleEventConfiguration.Shutdown.ExecutionTimeout != 300 {
return fmt.Errorf(
"Unexpected ExecutionTimeout: %d",
*opslayer.LifecycleEventConfiguration.Shutdown.ExecutionTimeout)
}
if v := len(opslayer.CustomSecurityGroupIds); v != 2 {
return fmt.Errorf("Expected 2 customSecurityGroupIds, got %d", v)
}
expectedPackages := []*string{
aws.String("git"),
aws.String("golang"),
}
if !reflect.DeepEqual(expectedPackages, opslayer.Packages) {
return fmt.Errorf("Unexpected Packages: %v", aws.StringValueSlice(opslayer.Packages))
}
expectedEbsVolumes := []*opsworks.VolumeConfiguration{
{
VolumeType: aws.String("gp2"),
NumberOfDisks: aws.Int64(2),
MountPoint: aws.String("/home"),
Size: aws.Int64(100),
RaidLevel: aws.Int64(0),
},
}
if !reflect.DeepEqual(expectedEbsVolumes, opslayer.VolumeConfigurations) {
return fmt.Errorf("Unnexpected VolumeConfiguration: %s", opslayer.VolumeConfigurations)
}
return nil
}
}
func testAccCheckAwsOpsworksCustomLayerDestroy(s *terraform.State) error {
opsworksconn := testAccProvider.Meta().(*AWSClient).opsworksconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_opsworks_custom_layer" {
continue
}
req := &opsworks.DescribeLayersInput{
LayerIds: []*string{
aws.String(rs.Primary.ID),
},
}
_, err := opsworksconn.DescribeLayers(req)
if err != nil {
if awserr, ok := err.(awserr.Error); ok {
if awserr.Code() == "ResourceNotFoundException" {
// not found, good to go
return nil
}
}
return err
}
}
return fmt.Errorf("Fall through error on OpsWorks custom layer test")
}
func testAccAwsOpsworksCustomLayerSecurityGroups(name string) string {
return fmt.Sprintf(`
resource "aws_security_group" "tf-ops-acc-layer1" {
name = "%s-layer1"
ingress {
from_port = 8
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "tf-ops-acc-layer2" {
name = "%s-layer2"
ingress {
from_port = 8
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}`, name, name)
}
func testAccAwsOpsworksCustomLayerConfigNoVpcCreate(name string) string {
return fmt.Sprintf(`
resource "aws_opsworks_custom_layer" "tf-acc" {
stack_id = "${aws_opsworks_stack.tf-acc.id}"
name = "%s"
short_name = "tf-ops-acc-custom-layer"
auto_assign_public_ips = true
custom_security_group_ids = [
"${aws_security_group.tf-ops-acc-layer1.id}",
"${aws_security_group.tf-ops-acc-layer2.id}",
]
drain_elb_on_shutdown = true
instance_shutdown_timeout = 300
system_packages = [
"git",
"golang",
]
ebs_volume {
type = "gp2"
number_of_disks = 2
mount_point = "/home"
size = 100
raid_level = 0
}
}
%s
%s
`, name, testAccAwsOpsworksStackConfigNoVpcCreate(name), testAccAwsOpsworksCustomLayerSecurityGroups(name))
}
func testAccAwsOpsworksCustomLayerConfigVpcCreate(name string) string {
return fmt.Sprintf(`
provider "aws" {
region = "us-west-2"
}
resource "aws_opsworks_custom_layer" "tf-acc" {
stack_id = "${aws_opsworks_stack.tf-acc.id}"
name = "%s"
short_name = "tf-ops-acc-custom-layer"
auto_assign_public_ips = false
custom_security_group_ids = [
"${aws_security_group.tf-ops-acc-layer1.id}",
"${aws_security_group.tf-ops-acc-layer2.id}",
]
drain_elb_on_shutdown = true
instance_shutdown_timeout = 300
system_packages = [
"git",
"golang",
]
ebs_volume {
type = "gp2"
number_of_disks = 2
mount_point = "/home"
size = 100
raid_level = 0
}
}
%s
%s
`, name, testAccAwsOpsworksStackConfigVpcCreate(name), testAccAwsOpsworksCustomLayerSecurityGroups(name))
}
func testAccAwsOpsworksCustomLayerConfigUpdate(name string) string {
return fmt.Sprintf(`
resource "aws_security_group" "tf-ops-acc-layer3" {
name = "tf-ops-acc-layer3"
ingress {
from_port = 8
to_port = -1
protocol = "icmp"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_opsworks_custom_layer" "tf-acc" {
stack_id = "${aws_opsworks_stack.tf-acc.id}"
name = "%s"
short_name = "tf-ops-acc-custom-layer"
auto_assign_public_ips = true
custom_security_group_ids = [
"${aws_security_group.tf-ops-acc-layer1.id}",
"${aws_security_group.tf-ops-acc-layer2.id}",
"${aws_security_group.tf-ops-acc-layer3.id}",
]
drain_elb_on_shutdown = false
instance_shutdown_timeout = 120
system_packages = [
"git",
"golang",
"subversion",
]
ebs_volume {
type = "gp2"
number_of_disks = 2
mount_point = "/home"
size = 100
raid_level = 0
}
ebs_volume {
type = "io1"
number_of_disks = 4
mount_point = "/var"
size = 100
raid_level = 1
iops = 3000
}
custom_json = "{\"layer_key\": \"layer_value2\"}"
}
%s
%s
`, name, testAccAwsOpsworksStackConfigNoVpcCreate(name), testAccAwsOpsworksCustomLayerSecurityGroups(name))
}
|
package openstack
import (
"fmt"
"log"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
)
func resourceNetworkingSubnetV2() *schema.Resource {
return &schema.Resource{
Create: resourceNetworkingSubnetV2Create,
Read: resourceNetworkingSubnetV2Read,
Update: resourceNetworkingSubnetV2Update,
Delete: resourceNetworkingSubnetV2Delete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"region": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""),
},
"network_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"cidr": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: false,
},
"tenant_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"allocation_pools": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"start": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"end": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
},
},
"gateway_ip": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: false,
},
"no_gateway": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: false,
Deprecated: "This argument is no longer required. Instead, omit gateway_ip or set it to an empty string",
},
"ip_version": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 4,
ForceNew: true,
},
"enable_dhcp": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: false,
Default: true,
},
"dns_nameservers": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
ForceNew: false,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"host_routes": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: false,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"destination_cidr": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"next_hop": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
},
},
"value_specs": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
},
}
}
func resourceNetworkingSubnetV2Create(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
networkingClient, err := config.networkingV2Client(d.Get("region").(string))
if err != nil {
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
}
createOpts := SubnetCreateOpts{
subnets.CreateOpts{
NetworkID: d.Get("network_id").(string),
CIDR: d.Get("cidr").(string),
Name: d.Get("name").(string),
TenantID: d.Get("tenant_id").(string),
AllocationPools: resourceSubnetAllocationPoolsV2(d),
DNSNameservers: resourceSubnetDNSNameserversV2(d),
HostRoutes: resourceSubnetHostRoutesV2(d),
EnableDHCP: nil,
},
MapValueSpecs(d),
}
if v, ok := d.GetOk("gateway_ip"); ok {
noGateway := d.Get("no_gateway").(bool)
if noGateway {
return fmt.Errorf("Both gateway_ip and no_gateway cannot be set.")
}
gatewayIP := v.(string)
createOpts.GatewayIP = &gatewayIP
}
if v, ok := d.GetOk("enable_dhcp"); ok {
enableDHCP := v.(bool)
createOpts.EnableDHCP = &enableDHCP
}
if v, ok := d.GetOk("ip_version"); ok {
ipVersion := resourceNetworkingSubnetV2DetermineIPVersion(v.(int))
createOpts.IPVersion = ipVersion
}
s, err := subnets.Create(networkingClient, createOpts).Extract()
if err != nil {
return fmt.Errorf("Error creating OpenStack Neutron subnet: %s", err)
}
log.Printf("[DEBUG] Waiting for Subnet (%s) to become available", s.ID)
stateConf := &resource.StateChangeConf{
Target: []string{"ACTIVE"},
Refresh: waitForSubnetActive(networkingClient, s.ID),
Timeout: 10 * time.Minute,
Delay: 5 * time.Second,
MinTimeout: 3 * time.Second,
}
_, err = stateConf.WaitForState()
d.SetId(s.ID)
log.Printf("[DEBUG] Created Subnet %s: %#v", s.ID, s)
return resourceNetworkingSubnetV2Read(d, meta)
}
func resourceNetworkingSubnetV2Read(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
networkingClient, err := config.networkingV2Client(d.Get("region").(string))
if err != nil {
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
}
s, err := subnets.Get(networkingClient, d.Id()).Extract()
if err != nil {
return CheckDeleted(d, err, "subnet")
}
log.Printf("[DEBUG] Retreived Subnet %s: %+v", d.Id(), s)
d.Set("network_id", s.NetworkID)
d.Set("cidr", s.CIDR)
d.Set("ip_version", s.IPVersion)
d.Set("name", s.Name)
d.Set("tenant_id", s.TenantID)
d.Set("allocation_pools", s.AllocationPools)
d.Set("gateway_ip", s.GatewayIP)
d.Set("dns_nameservers", s.DNSNameservers)
d.Set("host_routes", s.HostRoutes)
d.Set("enable_dhcp", s.EnableDHCP)
d.Set("network_id", s.NetworkID)
return nil
}
func resourceNetworkingSubnetV2Update(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
networkingClient, err := config.networkingV2Client(d.Get("region").(string))
if err != nil {
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
}
// Check if both gateway_ip and no_gateway are set
if _, ok := d.GetOk("gateway_ip"); ok {
noGateway := d.Get("no_gateway").(bool)
if noGateway {
return fmt.Errorf("Both gateway_ip and no_gateway cannot be set.")
}
}
var updateOpts subnets.UpdateOpts
if d.HasChange("name") {
updateOpts.Name = d.Get("name").(string)
}
if d.HasChange("gateway_ip") {
updateOpts.GatewayIP = nil
if v, ok := d.GetOk("gateway_ip"); ok {
gatewayIP := v.(string)
updateOpts.GatewayIP = &gatewayIP
}
}
if d.HasChange("no_gateway") {
noGateway := d.Get("no_gateway").(bool)
if noGateway {
updateOpts.GatewayIP = nil
}
}
if d.HasChange("dns_nameservers") {
updateOpts.DNSNameservers = resourceSubnetDNSNameserversV2(d)
}
if d.HasChange("host_routes") {
updateOpts.HostRoutes = resourceSubnetHostRoutesV2(d)
}
if d.HasChange("enable_dhcp") {
v := d.Get("enable_dhcp").(bool)
updateOpts.EnableDHCP = &v
}
log.Printf("[DEBUG] Updating Subnet %s with options: %+v", d.Id(), updateOpts)
_, err = subnets.Update(networkingClient, d.Id(), updateOpts).Extract()
if err != nil {
return fmt.Errorf("Error updating OpenStack Neutron Subnet: %s", err)
}
return resourceNetworkingSubnetV2Read(d, meta)
}
func resourceNetworkingSubnetV2Delete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
networkingClient, err := config.networkingV2Client(d.Get("region").(string))
if err != nil {
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
}
stateConf := &resource.StateChangeConf{
Pending: []string{"ACTIVE"},
Target: []string{"DELETED"},
Refresh: waitForSubnetDelete(networkingClient, d.Id()),
Timeout: 10 * time.Minute,
Delay: 5 * time.Second,
MinTimeout: 3 * time.Second,
}
_, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf("Error deleting OpenStack Neutron Subnet: %s", err)
}
d.SetId("")
return nil
}
func resourceSubnetAllocationPoolsV2(d *schema.ResourceData) []subnets.AllocationPool {
rawAPs := d.Get("allocation_pools").([]interface{})
aps := make([]subnets.AllocationPool, len(rawAPs))
for i, raw := range rawAPs {
rawMap := raw.(map[string]interface{})
aps[i] = subnets.AllocationPool{
Start: rawMap["start"].(string),
End: rawMap["end"].(string),
}
}
return aps
}
func resourceSubnetDNSNameserversV2(d *schema.ResourceData) []string {
rawDNSN := d.Get("dns_nameservers").(*schema.Set)
dnsn := make([]string, rawDNSN.Len())
for i, raw := range rawDNSN.List() {
dnsn[i] = raw.(string)
}
return dnsn
}
func resourceSubnetHostRoutesV2(d *schema.ResourceData) []subnets.HostRoute {
rawHR := d.Get("host_routes").([]interface{})
hr := make([]subnets.HostRoute, len(rawHR))
for i, raw := range rawHR {
rawMap := raw.(map[string]interface{})
hr[i] = subnets.HostRoute{
DestinationCIDR: rawMap["destination_cidr"].(string),
NextHop: rawMap["next_hop"].(string),
}
}
return hr
}
func resourceNetworkingSubnetV2DetermineIPVersion(v int) gophercloud.IPVersion {
var ipVersion gophercloud.IPVersion
switch v {
case 4:
ipVersion = gophercloud.IPv4
case 6:
ipVersion = gophercloud.IPv6
}
return ipVersion
}
func waitForSubnetActive(networkingClient *gophercloud.ServiceClient, subnetId string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
s, err := subnets.Get(networkingClient, subnetId).Extract()
if err != nil {
return nil, "", err
}
log.Printf("[DEBUG] OpenStack Neutron Subnet: %+v", s)
return s, "ACTIVE", nil
}
}
func waitForSubnetDelete(networkingClient *gophercloud.ServiceClient, subnetId string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
log.Printf("[DEBUG] Attempting to delete OpenStack Subnet %s.\n", subnetId)
s, err := subnets.Get(networkingClient, subnetId).Extract()
if err != nil {
if _, ok := err.(gophercloud.ErrDefault404); ok {
log.Printf("[DEBUG] Successfully deleted OpenStack Subnet %s", subnetId)
return s, "DELETED", nil
}
return s, "ACTIVE", err
}
err = subnets.Delete(networkingClient, subnetId).ExtractErr()
if err != nil {
if _, ok := err.(gophercloud.ErrDefault404); ok {
log.Printf("[DEBUG] Successfully deleted OpenStack Subnet %s", subnetId)
return s, "DELETED", nil
}
return s, "ACTIVE", err
}
log.Printf("[DEBUG] OpenStack Subnet %s still active.\n", subnetId)
return s, "ACTIVE", nil
}
}
provider/openstack: gophercloud migration: Only set 'gateway_ip' to "" if 'no_gateway' == true.
package openstack
import (
"fmt"
"log"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack/networking/v2/subnets"
)
func resourceNetworkingSubnetV2() *schema.Resource {
return &schema.Resource{
Create: resourceNetworkingSubnetV2Create,
Read: resourceNetworkingSubnetV2Read,
Update: resourceNetworkingSubnetV2Update,
Delete: resourceNetworkingSubnetV2Delete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"region": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""),
},
"network_id": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"cidr": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: false,
},
"tenant_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},
"allocation_pools": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"start": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"end": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
},
},
"gateway_ip": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: false,
},
"no_gateway": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: false,
Deprecated: "This argument is no longer required. Instead, omit gateway_ip or set it to an empty string",
},
"ip_version": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 4,
ForceNew: true,
},
"enable_dhcp": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: false,
Default: true,
},
"dns_nameservers": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
ForceNew: false,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"host_routes": &schema.Schema{
Type: schema.TypeList,
Optional: true,
ForceNew: false,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"destination_cidr": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"next_hop": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
},
},
},
"value_specs": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
},
}
}
func resourceNetworkingSubnetV2Create(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
networkingClient, err := config.networkingV2Client(d.Get("region").(string))
if err != nil {
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
}
createOpts := SubnetCreateOpts{
subnets.CreateOpts{
NetworkID: d.Get("network_id").(string),
CIDR: d.Get("cidr").(string),
Name: d.Get("name").(string),
TenantID: d.Get("tenant_id").(string),
AllocationPools: resourceSubnetAllocationPoolsV2(d),
DNSNameservers: resourceSubnetDNSNameserversV2(d),
HostRoutes: resourceSubnetHostRoutesV2(d),
EnableDHCP: nil,
},
MapValueSpecs(d),
}
if v, ok := d.GetOk("gateway_ip"); ok {
noGateway := d.Get("no_gateway").(bool)
if noGateway {
return fmt.Errorf("Both gateway_ip and no_gateway cannot be set.")
}
gatewayIP := v.(string)
createOpts.GatewayIP = &gatewayIP
}
if v, ok := d.GetOk("enable_dhcp"); ok {
enableDHCP := v.(bool)
createOpts.EnableDHCP = &enableDHCP
}
if v, ok := d.GetOk("ip_version"); ok {
ipVersion := resourceNetworkingSubnetV2DetermineIPVersion(v.(int))
createOpts.IPVersion = ipVersion
}
s, err := subnets.Create(networkingClient, createOpts).Extract()
if err != nil {
return fmt.Errorf("Error creating OpenStack Neutron subnet: %s", err)
}
log.Printf("[DEBUG] Waiting for Subnet (%s) to become available", s.ID)
stateConf := &resource.StateChangeConf{
Target: []string{"ACTIVE"},
Refresh: waitForSubnetActive(networkingClient, s.ID),
Timeout: 10 * time.Minute,
Delay: 5 * time.Second,
MinTimeout: 3 * time.Second,
}
_, err = stateConf.WaitForState()
d.SetId(s.ID)
log.Printf("[DEBUG] Created Subnet %s: %#v", s.ID, s)
return resourceNetworkingSubnetV2Read(d, meta)
}
func resourceNetworkingSubnetV2Read(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
networkingClient, err := config.networkingV2Client(d.Get("region").(string))
if err != nil {
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
}
s, err := subnets.Get(networkingClient, d.Id()).Extract()
if err != nil {
return CheckDeleted(d, err, "subnet")
}
log.Printf("[DEBUG] Retreived Subnet %s: %+v", d.Id(), s)
d.Set("network_id", s.NetworkID)
d.Set("cidr", s.CIDR)
d.Set("ip_version", s.IPVersion)
d.Set("name", s.Name)
d.Set("tenant_id", s.TenantID)
d.Set("allocation_pools", s.AllocationPools)
d.Set("gateway_ip", s.GatewayIP)
d.Set("dns_nameservers", s.DNSNameservers)
d.Set("host_routes", s.HostRoutes)
d.Set("enable_dhcp", s.EnableDHCP)
d.Set("network_id", s.NetworkID)
return nil
}
func resourceNetworkingSubnetV2Update(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
networkingClient, err := config.networkingV2Client(d.Get("region").(string))
if err != nil {
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
}
// Check if both gateway_ip and no_gateway are set
if _, ok := d.GetOk("gateway_ip"); ok {
noGateway := d.Get("no_gateway").(bool)
if noGateway {
return fmt.Errorf("Both gateway_ip and no_gateway cannot be set.")
}
}
var updateOpts subnets.UpdateOpts
if d.HasChange("name") {
updateOpts.Name = d.Get("name").(string)
}
if d.HasChange("gateway_ip") {
updateOpts.GatewayIP = nil
if v, ok := d.GetOk("gateway_ip"); ok {
gatewayIP := v.(string)
updateOpts.GatewayIP = &gatewayIP
}
}
if d.HasChange("no_gateway") {
if d.Get("no_gateway").(bool) {
gatewayIP := ""
updateOpts.GatewayIP = &gatewayIP
}
}
if d.HasChange("dns_nameservers") {
updateOpts.DNSNameservers = resourceSubnetDNSNameserversV2(d)
}
if d.HasChange("host_routes") {
updateOpts.HostRoutes = resourceSubnetHostRoutesV2(d)
}
if d.HasChange("enable_dhcp") {
v := d.Get("enable_dhcp").(bool)
updateOpts.EnableDHCP = &v
}
log.Printf("[DEBUG] Updating Subnet %s with options: %+v", d.Id(), updateOpts)
_, err = subnets.Update(networkingClient, d.Id(), updateOpts).Extract()
if err != nil {
return fmt.Errorf("Error updating OpenStack Neutron Subnet: %s", err)
}
return resourceNetworkingSubnetV2Read(d, meta)
}
func resourceNetworkingSubnetV2Delete(d *schema.ResourceData, meta interface{}) error {
config := meta.(*Config)
networkingClient, err := config.networkingV2Client(d.Get("region").(string))
if err != nil {
return fmt.Errorf("Error creating OpenStack networking client: %s", err)
}
stateConf := &resource.StateChangeConf{
Pending: []string{"ACTIVE"},
Target: []string{"DELETED"},
Refresh: waitForSubnetDelete(networkingClient, d.Id()),
Timeout: 10 * time.Minute,
Delay: 5 * time.Second,
MinTimeout: 3 * time.Second,
}
_, err = stateConf.WaitForState()
if err != nil {
return fmt.Errorf("Error deleting OpenStack Neutron Subnet: %s", err)
}
d.SetId("")
return nil
}
func resourceSubnetAllocationPoolsV2(d *schema.ResourceData) []subnets.AllocationPool {
rawAPs := d.Get("allocation_pools").([]interface{})
aps := make([]subnets.AllocationPool, len(rawAPs))
for i, raw := range rawAPs {
rawMap := raw.(map[string]interface{})
aps[i] = subnets.AllocationPool{
Start: rawMap["start"].(string),
End: rawMap["end"].(string),
}
}
return aps
}
func resourceSubnetDNSNameserversV2(d *schema.ResourceData) []string {
rawDNSN := d.Get("dns_nameservers").(*schema.Set)
dnsn := make([]string, rawDNSN.Len())
for i, raw := range rawDNSN.List() {
dnsn[i] = raw.(string)
}
return dnsn
}
func resourceSubnetHostRoutesV2(d *schema.ResourceData) []subnets.HostRoute {
rawHR := d.Get("host_routes").([]interface{})
hr := make([]subnets.HostRoute, len(rawHR))
for i, raw := range rawHR {
rawMap := raw.(map[string]interface{})
hr[i] = subnets.HostRoute{
DestinationCIDR: rawMap["destination_cidr"].(string),
NextHop: rawMap["next_hop"].(string),
}
}
return hr
}
func resourceNetworkingSubnetV2DetermineIPVersion(v int) gophercloud.IPVersion {
var ipVersion gophercloud.IPVersion
switch v {
case 4:
ipVersion = gophercloud.IPv4
case 6:
ipVersion = gophercloud.IPv6
}
return ipVersion
}
func waitForSubnetActive(networkingClient *gophercloud.ServiceClient, subnetId string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
s, err := subnets.Get(networkingClient, subnetId).Extract()
if err != nil {
return nil, "", err
}
log.Printf("[DEBUG] OpenStack Neutron Subnet: %+v", s)
return s, "ACTIVE", nil
}
}
func waitForSubnetDelete(networkingClient *gophercloud.ServiceClient, subnetId string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
log.Printf("[DEBUG] Attempting to delete OpenStack Subnet %s.\n", subnetId)
s, err := subnets.Get(networkingClient, subnetId).Extract()
if err != nil {
if _, ok := err.(gophercloud.ErrDefault404); ok {
log.Printf("[DEBUG] Successfully deleted OpenStack Subnet %s", subnetId)
return s, "DELETED", nil
}
return s, "ACTIVE", err
}
err = subnets.Delete(networkingClient, subnetId).ExtractErr()
if err != nil {
if _, ok := err.(gophercloud.ErrDefault404); ok {
log.Printf("[DEBUG] Successfully deleted OpenStack Subnet %s", subnetId)
return s, "DELETED", nil
}
return s, "ACTIVE", err
}
log.Printf("[DEBUG] OpenStack Subnet %s still active.\n", subnetId)
return s, "ACTIVE", nil
}
}
|
package lib
import (
"fmt"
"github.com/dgrijalva/jwt-go"
"golang.org/x/net/context"
)
// The key type is unexported to prevent collisions with context keys defined in
// other packages.
type key int
// idtKey is the context key for an identity. Its value of zero is
// arbitrary. If this package defined other context keys, they would have
// different integer values.
const idtKey key = 0
// NewContext returns a new Context carrying an Identity pat.
func NewContext(ctx context.Context, idt *Identity) context.Context {
return context.WithValue(ctx, idtKey, idt)
}
// FromContext extracts the Identity pat from ctx, if present.
func FromContext(ctx context.Context) (*Identity, bool) {
// ctx.Value returns nil if ctx has no value for the key;
p, ok := ctx.Value(idtKey).(*Identity)
return p, ok
}
// MustFromContext extracts the identity from ctx.
// If not present it panics.
func MustFromContext(ctx context.Context) *Identity {
idt, ok := ctx.Value(idtKey).(*Identity)
if !ok {
panic("identity is not registered")
}
return idt
}
func ParseToken(t, secret string) (*Identity, error) {
token, err := jwt.Parse(t, func(token *jwt.Token) (key interface{}, err error) {
return []byte(secret), nil
})
if err != nil {
return nil, err
}
identity := &Identity{}
pidString, ok := token.Claims["pid"].(string)
if !ok {
return nil, fmt.Errorf("failed cast to string of pid:%s",
fmt.Sprintln(token.Claims["pid"]))
}
idpString, ok := token.Claims["idp"].(string)
if !ok {
return nil, fmt.Errorf("failed cast to string of idp:%s",
fmt.Sprintln(token.Claims["idp"]))
}
displaynameString, ok := token.Claims["display_name"].(string)
if !ok {
return nil, fmt.Errorf("failed cast to string of display_ame:%s",
fmt.Sprintln(token.Claims["display_ame"]))
}
emailString, ok := token.Claims["email"].(string)
if !ok {
return nil, fmt.Errorf("failed cast to string of email:%s",
fmt.Sprintln(token.Claims["email"]))
}
identity.Pid = pidString
identity.Idp = idpString
identity.DisplayName = displaynameString
identity.Email = emailString
return identity, nil
}
type Identity struct {
Pid string `json:"pid"`
Idp string `json:"idp"`
Email string `json:"email"`
DisplayName string `json:"email"`
}
Identity implemens Stringer
package lib
import (
"fmt"
"github.com/dgrijalva/jwt-go"
"golang.org/x/net/context"
)
// The key type is unexported to prevent collisions with context keys defined in
// other packages.
type key int
// idtKey is the context key for an identity. Its value of zero is
// arbitrary. If this package defined other context keys, they would have
// different integer values.
const idtKey key = 0
// NewContext returns a new Context carrying an Identity pat.
func NewContext(ctx context.Context, idt *Identity) context.Context {
return context.WithValue(ctx, idtKey, idt)
}
// FromContext extracts the Identity pat from ctx, if present.
func FromContext(ctx context.Context) (*Identity, bool) {
// ctx.Value returns nil if ctx has no value for the key;
p, ok := ctx.Value(idtKey).(*Identity)
return p, ok
}
// MustFromContext extracts the identity from ctx.
// If not present it panics.
func MustFromContext(ctx context.Context) *Identity {
idt, ok := ctx.Value(idtKey).(*Identity)
if !ok {
panic("identity is not registered")
}
return idt
}
func ParseToken(t, secret string) (*Identity, error) {
token, err := jwt.Parse(t, func(token *jwt.Token) (key interface{}, err error) {
return []byte(secret), nil
})
if err != nil {
return nil, err
}
identity := &Identity{}
pidString, ok := token.Claims["pid"].(string)
if !ok {
return nil, fmt.Errorf("failed cast to string of pid:%s",
fmt.Sprintln(token.Claims["pid"]))
}
idpString, ok := token.Claims["idp"].(string)
if !ok {
return nil, fmt.Errorf("failed cast to string of idp:%s",
fmt.Sprintln(token.Claims["idp"]))
}
displaynameString, ok := token.Claims["display_name"].(string)
if !ok {
return nil, fmt.Errorf("failed cast to string of display_ame:%s",
fmt.Sprintln(token.Claims["display_ame"]))
}
emailString, ok := token.Claims["email"].(string)
if !ok {
return nil, fmt.Errorf("failed cast to string of email:%s",
fmt.Sprintln(token.Claims["email"]))
}
identity.Pid = pidString
identity.Idp = idpString
identity.DisplayName = displaynameString
identity.Email = emailString
return identity, nil
}
type Identity struct {
Pid string `json:"pid"`
Idp string `json:"idp"`
Email string `json:"email"`
DisplayName string `json:"email"`
}
func (i *Identity) String() string {
return fmt.Sprintf("pid:%s idp:%s email:%s", i.Pid, i.Idp, i.Email)
}
|
package aws
import (
"fmt"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAwsEc2ClientVpnNetworkAssociation_basic(t *testing.T) {
var assoc1 ec2.TargetNetwork
rStr := acctest.RandString(5)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProvidersWithTLS,
CheckDestroy: testAccCheckAwsEc2ClientVpnNetworkAssociationDestroy,
Steps: []resource.TestStep{
{
Config: testAccEc2ClientVpnNetworkAssociationConfig(rStr),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsEc2ClientVpnNetworkAssociationExists("aws_ec2_client_vpn_network_association.test", &assoc1),
),
},
},
})
}
func TestAccAwsEc2ClientVpnNetworkAssociation_disappears(t *testing.T) {
var assoc1 ec2.TargetNetwork
rStr := acctest.RandString(5)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProvidersWithTLS,
CheckDestroy: testAccCheckAwsEc2ClientVpnNetworkAssociationDestroy,
Steps: []resource.TestStep{
{
Config: testAccEc2ClientVpnNetworkAssociationConfig(rStr),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsEc2ClientVpnNetworkAssociationExists("aws_ec2_client_vpn_network_association.test", &assoc1),
testAccCheckAwsEc2ClientVpnNetworkAssociationDisappears(&assoc1),
),
ExpectNonEmptyPlan: true,
},
},
})
}
func testAccCheckAwsEc2ClientVpnNetworkAssociationDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).ec2conn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_ec2_client_vpn_network_association" {
continue
}
resp, _ := conn.DescribeClientVpnTargetNetworks(&ec2.DescribeClientVpnTargetNetworksInput{
ClientVpnEndpointId: aws.String(rs.Primary.Attributes["client_vpn_endpoint_id"]),
AssociationIds: []*string{aws.String(rs.Primary.ID)},
})
for _, v := range resp.ClientVpnTargetNetworks {
if *v.AssociationId == rs.Primary.ID && !(*v.Status.Code == "Disassociated") {
return fmt.Errorf("[DESTROY ERROR] Client VPN network association (%s) not deleted", rs.Primary.ID)
}
}
}
return nil
}
func testAccCheckAwsEc2ClientVpnNetworkAssociationDisappears(targetNetwork *ec2.TargetNetwork) resource.TestCheckFunc {
return func(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).ec2conn
_, err := conn.DisassociateClientVpnTargetNetwork(&ec2.DisassociateClientVpnTargetNetworkInput{
AssociationId: targetNetwork.AssociationId,
ClientVpnEndpointId: targetNetwork.ClientVpnEndpointId,
})
if err != nil {
return err
}
stateConf := &resource.StateChangeConf{
Pending: []string{ec2.AssociationStatusCodeDisassociating},
Target: []string{ec2.AssociationStatusCodeDisassociated},
Refresh: clientVpnNetworkAssociationRefreshFunc(conn, aws.StringValue(targetNetwork.AssociationId), aws.StringValue(targetNetwork.ClientVpnEndpointId)),
Timeout: 10 * time.Minute,
}
_, err = stateConf.WaitForState()
return err
}
}
func testAccCheckAwsEc2ClientVpnNetworkAssociationExists(name string, assoc *ec2.TargetNetwork) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).ec2conn
resp, err := conn.DescribeClientVpnTargetNetworks(&ec2.DescribeClientVpnTargetNetworksInput{
ClientVpnEndpointId: aws.String(rs.Primary.Attributes["client_vpn_endpoint_id"]),
AssociationIds: []*string{aws.String(rs.Primary.ID)},
})
if err != nil {
return fmt.Errorf("Error reading Client VPN network association (%s): %s", rs.Primary.ID, err)
}
for _, a := range resp.ClientVpnTargetNetworks {
if *a.AssociationId == rs.Primary.ID && !(*a.Status.Code == "Disassociated") {
*assoc = *a
return nil
}
}
return fmt.Errorf("Client VPN network association (%s) not found", rs.Primary.ID)
}
}
func testAccEc2ClientVpnNetworkAssociationConfig(rName string) string {
return fmt.Sprintf(`
resource "aws_vpc" "test" {
cidr_block = "10.1.0.0/16"
tags = {
Name = "terraform-testacc-subnet-%s"
}
}
resource "aws_subnet" "test" {
cidr_block = "10.1.1.0/24"
vpc_id = "${aws_vpc.test.id}"
map_public_ip_on_launch = true
tags = {
Name = "tf-acc-subnet-%s"
}
}
resource "tls_private_key" "example" {
algorithm = "RSA"
}
resource "tls_self_signed_cert" "example" {
key_algorithm = "RSA"
private_key_pem = "${tls_private_key.example.private_key_pem}"
subject {
common_name = "example.com"
organization = "ACME Examples, Inc"
}
validity_period_hours = 12
allowed_uses = [
"key_encipherment",
"digital_signature",
"server_auth",
]
}
resource "aws_acm_certificate" "cert" {
private_key = "${tls_private_key.example.private_key_pem}"
certificate_body = "${tls_self_signed_cert.example.cert_pem}"
}
resource "aws_ec2_client_vpn_endpoint" "test" {
description = "terraform-testacc-clientvpn-%s"
server_certificate_arn = "${aws_acm_certificate.cert.arn}"
client_cidr_block = "10.0.0.0/16"
authentication_options {
type = "certificate-authentication"
root_certificate_chain_arn = "${aws_acm_certificate.cert.arn}"
}
connection_log_options {
enabled = false
}
}
resource "aws_ec2_client_vpn_network_association" "test" {
client_vpn_endpoint_id = "${aws_ec2_client_vpn_endpoint.test.id}"
subnet_id = "${aws_subnet.test.id}"
}
`, rName, rName, rName)
}
tests/resource/aws_ec2_client_vpn_network_association: Use internal implementation for TLS key/certificate
Reference: https://github.com/terraform-providers/terraform-provider-aws/issues/10023
Output from acceptance testing:
```
--- PASS: TestAccAwsEc2ClientVpnNetworkAssociation_basic (549.92s)
--- PASS: TestAccAwsEc2ClientVpnNetworkAssociation_disappears (581.03s)
```
package aws
import (
"fmt"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAwsEc2ClientVpnNetworkAssociation_basic(t *testing.T) {
var assoc1 ec2.TargetNetwork
rStr := acctest.RandString(5)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAwsEc2ClientVpnNetworkAssociationDestroy,
Steps: []resource.TestStep{
{
Config: testAccEc2ClientVpnNetworkAssociationConfig(rStr),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsEc2ClientVpnNetworkAssociationExists("aws_ec2_client_vpn_network_association.test", &assoc1),
),
},
},
})
}
func TestAccAwsEc2ClientVpnNetworkAssociation_disappears(t *testing.T) {
var assoc1 ec2.TargetNetwork
rStr := acctest.RandString(5)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAwsEc2ClientVpnNetworkAssociationDestroy,
Steps: []resource.TestStep{
{
Config: testAccEc2ClientVpnNetworkAssociationConfig(rStr),
Check: resource.ComposeTestCheckFunc(
testAccCheckAwsEc2ClientVpnNetworkAssociationExists("aws_ec2_client_vpn_network_association.test", &assoc1),
testAccCheckAwsEc2ClientVpnNetworkAssociationDisappears(&assoc1),
),
ExpectNonEmptyPlan: true,
},
},
})
}
func testAccCheckAwsEc2ClientVpnNetworkAssociationDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).ec2conn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_ec2_client_vpn_network_association" {
continue
}
resp, _ := conn.DescribeClientVpnTargetNetworks(&ec2.DescribeClientVpnTargetNetworksInput{
ClientVpnEndpointId: aws.String(rs.Primary.Attributes["client_vpn_endpoint_id"]),
AssociationIds: []*string{aws.String(rs.Primary.ID)},
})
for _, v := range resp.ClientVpnTargetNetworks {
if *v.AssociationId == rs.Primary.ID && !(*v.Status.Code == "Disassociated") {
return fmt.Errorf("[DESTROY ERROR] Client VPN network association (%s) not deleted", rs.Primary.ID)
}
}
}
return nil
}
func testAccCheckAwsEc2ClientVpnNetworkAssociationDisappears(targetNetwork *ec2.TargetNetwork) resource.TestCheckFunc {
return func(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).ec2conn
_, err := conn.DisassociateClientVpnTargetNetwork(&ec2.DisassociateClientVpnTargetNetworkInput{
AssociationId: targetNetwork.AssociationId,
ClientVpnEndpointId: targetNetwork.ClientVpnEndpointId,
})
if err != nil {
return err
}
stateConf := &resource.StateChangeConf{
Pending: []string{ec2.AssociationStatusCodeDisassociating},
Target: []string{ec2.AssociationStatusCodeDisassociated},
Refresh: clientVpnNetworkAssociationRefreshFunc(conn, aws.StringValue(targetNetwork.AssociationId), aws.StringValue(targetNetwork.ClientVpnEndpointId)),
Timeout: 10 * time.Minute,
}
_, err = stateConf.WaitForState()
return err
}
}
func testAccCheckAwsEc2ClientVpnNetworkAssociationExists(name string, assoc *ec2.TargetNetwork) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No ID is set")
}
conn := testAccProvider.Meta().(*AWSClient).ec2conn
resp, err := conn.DescribeClientVpnTargetNetworks(&ec2.DescribeClientVpnTargetNetworksInput{
ClientVpnEndpointId: aws.String(rs.Primary.Attributes["client_vpn_endpoint_id"]),
AssociationIds: []*string{aws.String(rs.Primary.ID)},
})
if err != nil {
return fmt.Errorf("Error reading Client VPN network association (%s): %s", rs.Primary.ID, err)
}
for _, a := range resp.ClientVpnTargetNetworks {
if *a.AssociationId == rs.Primary.ID && !(*a.Status.Code == "Disassociated") {
*assoc = *a
return nil
}
}
return fmt.Errorf("Client VPN network association (%s) not found", rs.Primary.ID)
}
}
func testAccEc2ClientVpnNetworkAssociationConfigAcmCertificateBase() string {
key := tlsRsaPrivateKeyPem(2048)
certificate := tlsRsaX509SelfSignedCertificatePem(key, "example.com")
return fmt.Sprintf(`
resource "aws_acm_certificate" "test" {
certificate_body = "%[1]s"
private_key = "%[2]s"
}
`, tlsPemEscapeNewlines(certificate), tlsPemEscapeNewlines(key))
}
func testAccEc2ClientVpnNetworkAssociationConfig(rName string) string {
return testAccEc2ClientVpnNetworkAssociationConfigAcmCertificateBase() + fmt.Sprintf(`
resource "aws_vpc" "test" {
cidr_block = "10.1.0.0/16"
tags = {
Name = "terraform-testacc-subnet-%s"
}
}
resource "aws_subnet" "test" {
cidr_block = "10.1.1.0/24"
vpc_id = "${aws_vpc.test.id}"
map_public_ip_on_launch = true
tags = {
Name = "tf-acc-subnet-%s"
}
}
resource "aws_ec2_client_vpn_endpoint" "test" {
description = "terraform-testacc-clientvpn-%s"
server_certificate_arn = "${aws_acm_certificate.test.arn}"
client_cidr_block = "10.0.0.0/16"
authentication_options {
type = "certificate-authentication"
root_certificate_chain_arn = "${aws_acm_certificate.test.arn}"
}
connection_log_options {
enabled = false
}
}
resource "aws_ec2_client_vpn_network_association" "test" {
client_vpn_endpoint_id = "${aws_ec2_client_vpn_endpoint.test.id}"
subnet_id = "${aws_subnet.test.id}"
}
`, rName, rName, rName)
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"path/filepath"
"runtime"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/kubelet/qos"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/master/ports"
kruntime "k8s.io/kubernetes/pkg/runtime"
)
const (
defaultRootDir = "/var/lib/kubelet"
// When these values are updated, also update test/e2e/framework/util.go
defaultPodInfraContainerImageName = "gcr.io/google_containers/pause"
defaultPodInfraContainerImageVersion = "3.0"
defaultPodInfraContainerImage = defaultPodInfraContainerImageName +
"-" + runtime.GOARCH + ":" +
defaultPodInfraContainerImageVersion
// From pkg/kubelet/rkt/rkt.go to avoid circular import
defaultRktAPIServiceEndpoint = "localhost:15441"
AutoDetectCloudProvider = "auto-detect"
defaultIPTablesMasqueradeBit = 14
defaultIPTablesDropBit = 15
)
var zeroDuration = unversioned.Duration{}
func addDefaultingFuncs(scheme *kruntime.Scheme) error {
return scheme.AddDefaultingFuncs(
SetDefaults_KubeProxyConfiguration,
SetDefaults_KubeSchedulerConfiguration,
SetDefaults_LeaderElectionConfiguration,
SetDefaults_KubeletConfiguration,
)
}
func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) {
if obj.BindAddress == "" {
obj.BindAddress = "0.0.0.0"
}
if obj.HealthzPort == 0 {
obj.HealthzPort = 10249
}
if obj.HealthzBindAddress == "" {
obj.HealthzBindAddress = "127.0.0.1"
}
if obj.OOMScoreAdj == nil {
temp := int32(qos.KubeProxyOOMScoreAdj)
obj.OOMScoreAdj = &temp
}
if obj.ResourceContainer == "" {
obj.ResourceContainer = "/kube-proxy"
}
if obj.IPTablesSyncPeriod.Duration == 0 {
obj.IPTablesSyncPeriod = unversioned.Duration{Duration: 30 * time.Second}
}
zero := unversioned.Duration{}
if obj.UDPIdleTimeout == zero {
obj.UDPIdleTimeout = unversioned.Duration{Duration: 250 * time.Millisecond}
}
// If ConntrackMax is set, respect it.
if obj.ConntrackMax == 0 {
// If ConntrackMax is *not* set, use per-core scaling.
if obj.ConntrackMaxPerCore == 0 {
obj.ConntrackMaxPerCore = 32 * 1024
}
}
if obj.IPTablesMasqueradeBit == nil {
temp := int32(14)
obj.IPTablesMasqueradeBit = &temp
}
if obj.ConntrackTCPEstablishedTimeout == zero {
obj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default)
}
}
func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) {
if obj.Port == 0 {
obj.Port = ports.SchedulerPort
}
if obj.Address == "" {
obj.Address = "0.0.0.0"
}
if obj.AlgorithmProvider == "" {
obj.AlgorithmProvider = "DefaultProvider"
}
if obj.ContentType == "" {
obj.ContentType = "application/vnd.kubernetes.protobuf"
}
if obj.KubeAPIQPS == 0 {
obj.KubeAPIQPS = 50.0
}
if obj.KubeAPIBurst == 0 {
obj.KubeAPIBurst = 100
}
if obj.SchedulerName == "" {
obj.SchedulerName = api.DefaultSchedulerName
}
if obj.HardPodAffinitySymmetricWeight == 0 {
obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight
}
if obj.FailureDomains == "" {
obj.FailureDomains = api.DefaultFailureDomains
}
}
func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) {
zero := unversioned.Duration{}
if obj.LeaseDuration == zero {
obj.LeaseDuration = unversioned.Duration{Duration: 15 * time.Second}
}
if obj.RenewDeadline == zero {
obj.RenewDeadline = unversioned.Duration{Duration: 10 * time.Second}
}
if obj.RetryPeriod == zero {
obj.RetryPeriod = unversioned.Duration{Duration: 2 * time.Second}
}
}
func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
if obj.Address == "" {
obj.Address = "0.0.0.0"
}
if obj.CloudProvider == "" {
obj.CloudProvider = AutoDetectCloudProvider
}
if obj.CAdvisorPort == 0 {
obj.CAdvisorPort = 4194
}
if obj.VolumeStatsAggPeriod == zeroDuration {
obj.VolumeStatsAggPeriod = unversioned.Duration{Duration: time.Minute}
}
if obj.CertDirectory == "" {
obj.CertDirectory = "/var/run/kubernetes"
}
if obj.ConfigureCBR0 == nil {
obj.ConfigureCBR0 = boolVar(false)
}
if obj.CgroupsPerQOS == nil {
obj.CgroupsPerQOS = boolVar(false)
}
if obj.ContainerRuntime == "" {
obj.ContainerRuntime = "docker"
}
if obj.RuntimeRequestTimeout == zeroDuration {
obj.RuntimeRequestTimeout = unversioned.Duration{Duration: 2 * time.Minute}
}
if obj.CPUCFSQuota == nil {
obj.CPUCFSQuota = boolVar(true)
}
if obj.DockerExecHandlerName == "" {
obj.DockerExecHandlerName = "native"
}
if obj.DockerEndpoint == "" {
obj.DockerEndpoint = "unix:///var/run/docker.sock"
}
if obj.EventBurst == 0 {
obj.EventBurst = 10
}
if obj.EventRecordQPS == nil {
temp := int32(5)
obj.EventRecordQPS = &temp
}
if obj.EnableControllerAttachDetach == nil {
obj.EnableControllerAttachDetach = boolVar(true)
}
if obj.EnableDebuggingHandlers == nil {
obj.EnableDebuggingHandlers = boolVar(true)
}
if obj.EnableServer == nil {
obj.EnableServer = boolVar(true)
}
if obj.FileCheckFrequency == zeroDuration {
obj.FileCheckFrequency = unversioned.Duration{Duration: 20 * time.Second}
}
if obj.HealthzBindAddress == "" {
obj.HealthzBindAddress = "127.0.0.1"
}
if obj.HealthzPort == 0 {
obj.HealthzPort = 10248
}
if obj.HostNetworkSources == nil {
obj.HostNetworkSources = []string{kubetypes.AllSource}
}
if obj.HostPIDSources == nil {
obj.HostPIDSources = []string{kubetypes.AllSource}
}
if obj.HostIPCSources == nil {
obj.HostIPCSources = []string{kubetypes.AllSource}
}
if obj.HTTPCheckFrequency == zeroDuration {
obj.HTTPCheckFrequency = unversioned.Duration{Duration: 20 * time.Second}
}
if obj.ImageMinimumGCAge == zeroDuration {
obj.ImageMinimumGCAge = unversioned.Duration{Duration: 2 * time.Minute}
}
if obj.ImageGCHighThresholdPercent == nil {
temp := int32(90)
obj.ImageGCHighThresholdPercent = &temp
}
if obj.ImageGCLowThresholdPercent == nil {
temp := int32(80)
obj.ImageGCLowThresholdPercent = &temp
}
if obj.LowDiskSpaceThresholdMB == 0 {
obj.LowDiskSpaceThresholdMB = 256
}
if obj.MasterServiceNamespace == "" {
obj.MasterServiceNamespace = api.NamespaceDefault
}
if obj.MaxContainerCount == nil {
temp := int32(-1)
obj.MaxContainerCount = &temp
}
if obj.MaxPerPodContainerCount == 0 {
obj.MaxPerPodContainerCount = 1
}
if obj.MaxOpenFiles == 0 {
obj.MaxOpenFiles = 1000000
}
if obj.MaxPods == 0 {
obj.MaxPods = 110
}
if obj.MinimumGCAge == zeroDuration {
obj.MinimumGCAge = unversioned.Duration{Duration: 0}
}
// For backwards-compat NetworkPluginDir is consulted as source of CNI config
// but CNIConfDir is preferred; don't set this default if CNI is in use.
if obj.NetworkPluginDir == "" && obj.NetworkPluginName != "cni" {
obj.NetworkPluginDir = "/usr/libexec/kubernetes/kubelet-plugins/net/exec/"
}
if obj.NonMasqueradeCIDR == "" {
obj.NonMasqueradeCIDR = "10.0.0.0/8"
}
if obj.VolumePluginDir == "" {
obj.VolumePluginDir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
}
if obj.NodeStatusUpdateFrequency == zeroDuration {
obj.NodeStatusUpdateFrequency = unversioned.Duration{Duration: 10 * time.Second}
}
if obj.OOMScoreAdj == nil {
temp := int32(qos.KubeletOOMScoreAdj)
obj.OOMScoreAdj = &temp
}
if obj.PodInfraContainerImage == "" {
obj.PodInfraContainerImage = defaultPodInfraContainerImage
}
if obj.Port == 0 {
obj.Port = ports.KubeletPort
}
if obj.ReadOnlyPort == 0 {
obj.ReadOnlyPort = ports.KubeletReadOnlyPort
}
if obj.RegisterNode == nil {
obj.RegisterNode = boolVar(true)
}
if obj.RegisterSchedulable == nil {
obj.RegisterSchedulable = boolVar(true)
}
if obj.RegistryBurst == 0 {
obj.RegistryBurst = 10
}
if obj.RegistryPullQPS == nil {
temp := int32(5)
obj.RegistryPullQPS = &temp
}
if obj.ResolverConfig == "" {
obj.ResolverConfig = kubetypes.ResolvConfDefault
}
if obj.RktAPIEndpoint == "" {
obj.RktAPIEndpoint = defaultRktAPIServiceEndpoint
}
if obj.RootDirectory == "" {
obj.RootDirectory = defaultRootDir
}
if obj.SerializeImagePulls == nil {
obj.SerializeImagePulls = boolVar(true)
}
if obj.SeccompProfileRoot == "" {
filepath.Join(defaultRootDir, "seccomp")
}
if obj.StreamingConnectionIdleTimeout == zeroDuration {
obj.StreamingConnectionIdleTimeout = unversioned.Duration{Duration: 4 * time.Hour}
}
if obj.SyncFrequency == zeroDuration {
obj.SyncFrequency = unversioned.Duration{Duration: 1 * time.Minute}
}
if obj.ReconcileCIDR == nil {
obj.ReconcileCIDR = boolVar(true)
}
if obj.ContentType == "" {
obj.ContentType = "application/vnd.kubernetes.protobuf"
}
if obj.KubeAPIQPS == nil {
temp := int32(5)
obj.KubeAPIQPS = &temp
}
if obj.KubeAPIBurst == 0 {
obj.KubeAPIBurst = 10
}
if obj.OutOfDiskTransitionFrequency == zeroDuration {
obj.OutOfDiskTransitionFrequency = unversioned.Duration{Duration: 5 * time.Minute}
}
if string(obj.HairpinMode) == "" {
obj.HairpinMode = PromiscuousBridge
}
if obj.EvictionHard == nil {
temp := "memory.available<100Mi"
obj.EvictionHard = &temp
}
if obj.EvictionPressureTransitionPeriod == zeroDuration {
obj.EvictionPressureTransitionPeriod = unversioned.Duration{Duration: 5 * time.Minute}
}
if obj.SystemReserved == nil {
obj.SystemReserved = make(map[string]string)
}
if obj.KubeReserved == nil {
obj.KubeReserved = make(map[string]string)
}
if obj.MakeIPTablesUtilChains == nil {
obj.MakeIPTablesUtilChains = boolVar(true)
}
if obj.IPTablesMasqueradeBit == nil {
temp := int32(defaultIPTablesMasqueradeBit)
obj.IPTablesMasqueradeBit = &temp
}
if obj.IPTablesDropBit == nil {
temp := int32(defaultIPTablesDropBit)
obj.IPTablesDropBit = &temp
}
}
func boolVar(b bool) *bool {
return &b
}
var (
defaultCfg = KubeletConfiguration{}
)
Move default directory for exec plugin into exec plugin
Kubernetes-commit: a2e583a86cf01f8ae1a237f888aafcd7c67a8b60
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"path/filepath"
"runtime"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/kubelet/qos"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/master/ports"
kruntime "k8s.io/kubernetes/pkg/runtime"
)
const (
defaultRootDir = "/var/lib/kubelet"
// When these values are updated, also update test/e2e/framework/util.go
defaultPodInfraContainerImageName = "gcr.io/google_containers/pause"
defaultPodInfraContainerImageVersion = "3.0"
defaultPodInfraContainerImage = defaultPodInfraContainerImageName +
"-" + runtime.GOARCH + ":" +
defaultPodInfraContainerImageVersion
// From pkg/kubelet/rkt/rkt.go to avoid circular import
defaultRktAPIServiceEndpoint = "localhost:15441"
AutoDetectCloudProvider = "auto-detect"
defaultIPTablesMasqueradeBit = 14
defaultIPTablesDropBit = 15
)
var zeroDuration = unversioned.Duration{}
func addDefaultingFuncs(scheme *kruntime.Scheme) error {
return scheme.AddDefaultingFuncs(
SetDefaults_KubeProxyConfiguration,
SetDefaults_KubeSchedulerConfiguration,
SetDefaults_LeaderElectionConfiguration,
SetDefaults_KubeletConfiguration,
)
}
func SetDefaults_KubeProxyConfiguration(obj *KubeProxyConfiguration) {
if obj.BindAddress == "" {
obj.BindAddress = "0.0.0.0"
}
if obj.HealthzPort == 0 {
obj.HealthzPort = 10249
}
if obj.HealthzBindAddress == "" {
obj.HealthzBindAddress = "127.0.0.1"
}
if obj.OOMScoreAdj == nil {
temp := int32(qos.KubeProxyOOMScoreAdj)
obj.OOMScoreAdj = &temp
}
if obj.ResourceContainer == "" {
obj.ResourceContainer = "/kube-proxy"
}
if obj.IPTablesSyncPeriod.Duration == 0 {
obj.IPTablesSyncPeriod = unversioned.Duration{Duration: 30 * time.Second}
}
zero := unversioned.Duration{}
if obj.UDPIdleTimeout == zero {
obj.UDPIdleTimeout = unversioned.Duration{Duration: 250 * time.Millisecond}
}
// If ConntrackMax is set, respect it.
if obj.ConntrackMax == 0 {
// If ConntrackMax is *not* set, use per-core scaling.
if obj.ConntrackMaxPerCore == 0 {
obj.ConntrackMaxPerCore = 32 * 1024
}
}
if obj.IPTablesMasqueradeBit == nil {
temp := int32(14)
obj.IPTablesMasqueradeBit = &temp
}
if obj.ConntrackTCPEstablishedTimeout == zero {
obj.ConntrackTCPEstablishedTimeout = unversioned.Duration{Duration: 24 * time.Hour} // 1 day (1/5 default)
}
}
func SetDefaults_KubeSchedulerConfiguration(obj *KubeSchedulerConfiguration) {
if obj.Port == 0 {
obj.Port = ports.SchedulerPort
}
if obj.Address == "" {
obj.Address = "0.0.0.0"
}
if obj.AlgorithmProvider == "" {
obj.AlgorithmProvider = "DefaultProvider"
}
if obj.ContentType == "" {
obj.ContentType = "application/vnd.kubernetes.protobuf"
}
if obj.KubeAPIQPS == 0 {
obj.KubeAPIQPS = 50.0
}
if obj.KubeAPIBurst == 0 {
obj.KubeAPIBurst = 100
}
if obj.SchedulerName == "" {
obj.SchedulerName = api.DefaultSchedulerName
}
if obj.HardPodAffinitySymmetricWeight == 0 {
obj.HardPodAffinitySymmetricWeight = api.DefaultHardPodAffinitySymmetricWeight
}
if obj.FailureDomains == "" {
obj.FailureDomains = api.DefaultFailureDomains
}
}
func SetDefaults_LeaderElectionConfiguration(obj *LeaderElectionConfiguration) {
zero := unversioned.Duration{}
if obj.LeaseDuration == zero {
obj.LeaseDuration = unversioned.Duration{Duration: 15 * time.Second}
}
if obj.RenewDeadline == zero {
obj.RenewDeadline = unversioned.Duration{Duration: 10 * time.Second}
}
if obj.RetryPeriod == zero {
obj.RetryPeriod = unversioned.Duration{Duration: 2 * time.Second}
}
}
func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
if obj.Address == "" {
obj.Address = "0.0.0.0"
}
if obj.CloudProvider == "" {
obj.CloudProvider = AutoDetectCloudProvider
}
if obj.CAdvisorPort == 0 {
obj.CAdvisorPort = 4194
}
if obj.VolumeStatsAggPeriod == zeroDuration {
obj.VolumeStatsAggPeriod = unversioned.Duration{Duration: time.Minute}
}
if obj.CertDirectory == "" {
obj.CertDirectory = "/var/run/kubernetes"
}
if obj.ConfigureCBR0 == nil {
obj.ConfigureCBR0 = boolVar(false)
}
if obj.CgroupsPerQOS == nil {
obj.CgroupsPerQOS = boolVar(false)
}
if obj.ContainerRuntime == "" {
obj.ContainerRuntime = "docker"
}
if obj.RuntimeRequestTimeout == zeroDuration {
obj.RuntimeRequestTimeout = unversioned.Duration{Duration: 2 * time.Minute}
}
if obj.CPUCFSQuota == nil {
obj.CPUCFSQuota = boolVar(true)
}
if obj.DockerExecHandlerName == "" {
obj.DockerExecHandlerName = "native"
}
if obj.DockerEndpoint == "" {
obj.DockerEndpoint = "unix:///var/run/docker.sock"
}
if obj.EventBurst == 0 {
obj.EventBurst = 10
}
if obj.EventRecordQPS == nil {
temp := int32(5)
obj.EventRecordQPS = &temp
}
if obj.EnableControllerAttachDetach == nil {
obj.EnableControllerAttachDetach = boolVar(true)
}
if obj.EnableDebuggingHandlers == nil {
obj.EnableDebuggingHandlers = boolVar(true)
}
if obj.EnableServer == nil {
obj.EnableServer = boolVar(true)
}
if obj.FileCheckFrequency == zeroDuration {
obj.FileCheckFrequency = unversioned.Duration{Duration: 20 * time.Second}
}
if obj.HealthzBindAddress == "" {
obj.HealthzBindAddress = "127.0.0.1"
}
if obj.HealthzPort == 0 {
obj.HealthzPort = 10248
}
if obj.HostNetworkSources == nil {
obj.HostNetworkSources = []string{kubetypes.AllSource}
}
if obj.HostPIDSources == nil {
obj.HostPIDSources = []string{kubetypes.AllSource}
}
if obj.HostIPCSources == nil {
obj.HostIPCSources = []string{kubetypes.AllSource}
}
if obj.HTTPCheckFrequency == zeroDuration {
obj.HTTPCheckFrequency = unversioned.Duration{Duration: 20 * time.Second}
}
if obj.ImageMinimumGCAge == zeroDuration {
obj.ImageMinimumGCAge = unversioned.Duration{Duration: 2 * time.Minute}
}
if obj.ImageGCHighThresholdPercent == nil {
temp := int32(90)
obj.ImageGCHighThresholdPercent = &temp
}
if obj.ImageGCLowThresholdPercent == nil {
temp := int32(80)
obj.ImageGCLowThresholdPercent = &temp
}
if obj.LowDiskSpaceThresholdMB == 0 {
obj.LowDiskSpaceThresholdMB = 256
}
if obj.MasterServiceNamespace == "" {
obj.MasterServiceNamespace = api.NamespaceDefault
}
if obj.MaxContainerCount == nil {
temp := int32(-1)
obj.MaxContainerCount = &temp
}
if obj.MaxPerPodContainerCount == 0 {
obj.MaxPerPodContainerCount = 1
}
if obj.MaxOpenFiles == 0 {
obj.MaxOpenFiles = 1000000
}
if obj.MaxPods == 0 {
obj.MaxPods = 110
}
if obj.MinimumGCAge == zeroDuration {
obj.MinimumGCAge = unversioned.Duration{Duration: 0}
}
if obj.NonMasqueradeCIDR == "" {
obj.NonMasqueradeCIDR = "10.0.0.0/8"
}
if obj.VolumePluginDir == "" {
obj.VolumePluginDir = "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
}
if obj.NodeStatusUpdateFrequency == zeroDuration {
obj.NodeStatusUpdateFrequency = unversioned.Duration{Duration: 10 * time.Second}
}
if obj.OOMScoreAdj == nil {
temp := int32(qos.KubeletOOMScoreAdj)
obj.OOMScoreAdj = &temp
}
if obj.PodInfraContainerImage == "" {
obj.PodInfraContainerImage = defaultPodInfraContainerImage
}
if obj.Port == 0 {
obj.Port = ports.KubeletPort
}
if obj.ReadOnlyPort == 0 {
obj.ReadOnlyPort = ports.KubeletReadOnlyPort
}
if obj.RegisterNode == nil {
obj.RegisterNode = boolVar(true)
}
if obj.RegisterSchedulable == nil {
obj.RegisterSchedulable = boolVar(true)
}
if obj.RegistryBurst == 0 {
obj.RegistryBurst = 10
}
if obj.RegistryPullQPS == nil {
temp := int32(5)
obj.RegistryPullQPS = &temp
}
if obj.ResolverConfig == "" {
obj.ResolverConfig = kubetypes.ResolvConfDefault
}
if obj.RktAPIEndpoint == "" {
obj.RktAPIEndpoint = defaultRktAPIServiceEndpoint
}
if obj.RootDirectory == "" {
obj.RootDirectory = defaultRootDir
}
if obj.SerializeImagePulls == nil {
obj.SerializeImagePulls = boolVar(true)
}
if obj.SeccompProfileRoot == "" {
filepath.Join(defaultRootDir, "seccomp")
}
if obj.StreamingConnectionIdleTimeout == zeroDuration {
obj.StreamingConnectionIdleTimeout = unversioned.Duration{Duration: 4 * time.Hour}
}
if obj.SyncFrequency == zeroDuration {
obj.SyncFrequency = unversioned.Duration{Duration: 1 * time.Minute}
}
if obj.ReconcileCIDR == nil {
obj.ReconcileCIDR = boolVar(true)
}
if obj.ContentType == "" {
obj.ContentType = "application/vnd.kubernetes.protobuf"
}
if obj.KubeAPIQPS == nil {
temp := int32(5)
obj.KubeAPIQPS = &temp
}
if obj.KubeAPIBurst == 0 {
obj.KubeAPIBurst = 10
}
if obj.OutOfDiskTransitionFrequency == zeroDuration {
obj.OutOfDiskTransitionFrequency = unversioned.Duration{Duration: 5 * time.Minute}
}
if string(obj.HairpinMode) == "" {
obj.HairpinMode = PromiscuousBridge
}
if obj.EvictionHard == nil {
temp := "memory.available<100Mi"
obj.EvictionHard = &temp
}
if obj.EvictionPressureTransitionPeriod == zeroDuration {
obj.EvictionPressureTransitionPeriod = unversioned.Duration{Duration: 5 * time.Minute}
}
if obj.SystemReserved == nil {
obj.SystemReserved = make(map[string]string)
}
if obj.KubeReserved == nil {
obj.KubeReserved = make(map[string]string)
}
if obj.MakeIPTablesUtilChains == nil {
obj.MakeIPTablesUtilChains = boolVar(true)
}
if obj.IPTablesMasqueradeBit == nil {
temp := int32(defaultIPTablesMasqueradeBit)
obj.IPTablesMasqueradeBit = &temp
}
if obj.IPTablesDropBit == nil {
temp := int32(defaultIPTablesDropBit)
obj.IPTablesDropBit = &temp
}
}
func boolVar(b bool) *bool {
return &b
}
var (
defaultCfg = KubeletConfiguration{}
)
|
package node
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rancher/norman/clientbase"
"github.com/rancher/norman/event"
"github.com/rancher/norman/types/values"
"github.com/rancher/rancher/pkg/encryptedstore"
"github.com/rancher/rancher/pkg/nodeconfig"
"github.com/rancher/types/apis/management.cattle.io/v3"
"github.com/rancher/types/config"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
kerror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
const (
defaultEngineInstallURL = "https://releases.rancher.com/install-docker/17.03.2.sh"
)
func Register(management *config.ManagementContext) {
secretStore, err := nodeconfig.NewStore(management)
if err != nil {
logrus.Fatal(err)
}
nodeClient := management.Management.Nodes("")
nodeLifecycle := &Lifecycle{
secretStore: secretStore,
nodeClient: nodeClient,
nodeTemplateClient: management.Management.NodeTemplates(""),
nodeTemplateGenericClient: management.Management.NodeTemplates("").ObjectClient().UnstructuredClient(),
configMapGetter: management.K8sClient.CoreV1(),
logger: management.EventLogger,
clusterLister: management.Management.Clusters("").Controller().Lister(),
}
nodeClient.AddLifecycle("node-controller", nodeLifecycle)
}
type Lifecycle struct {
secretStore *encryptedstore.GenericEncryptedStore
nodeTemplateGenericClient clientbase.GenericClient
nodeClient v3.NodeInterface
nodeTemplateClient v3.NodeTemplateInterface
configMapGetter typedv1.ConfigMapsGetter
logger event.Logger
clusterLister v3.ClusterLister
}
func (m *Lifecycle) setupCustom(obj *v3.Node) {
obj.Status.NodeConfig = &v3.RKEConfigNode{
NodeName: obj.Spec.ClusterName + ":" + obj.Name,
HostnameOverride: obj.Spec.RequestedHostname,
Address: obj.Spec.CustomConfig.Address,
InternalAddress: obj.Spec.CustomConfig.InternalAddress,
User: obj.Spec.CustomConfig.User,
DockerSocket: obj.Spec.CustomConfig.DockerSocket,
SSHKey: obj.Spec.CustomConfig.SSHKey,
Role: roles(obj),
}
if obj.Status.NodeConfig.User == "" {
obj.Status.NodeConfig.User = "root"
}
}
func isCustom(obj *v3.Node) bool {
return obj.Spec.CustomConfig != nil && obj.Spec.CustomConfig.Address != ""
}
func (m *Lifecycle) Create(obj *v3.Node) (*v3.Node, error) {
if isCustom(obj) {
m.setupCustom(obj)
newObj, err := v3.NodeConditionInitialized.Once(obj, func() (runtime.Object, error) {
if err := validateCustomHost(obj); err != nil {
return obj, err
}
return obj, nil
})
return newObj.(*v3.Node), err
}
if obj.Spec.NodeTemplateName == "" {
return obj, nil
}
newObj, err := v3.NodeConditionInitialized.Once(obj, func() (runtime.Object, error) {
template, err := m.nodeTemplateClient.Get(obj.Spec.NodeTemplateName, metav1.GetOptions{})
if err != nil {
return obj, err
}
obj.Status.NodeTemplateSpec = &template.Spec
if obj.Spec.RequestedHostname == "" {
obj.Spec.RequestedHostname = obj.Name
}
if obj.Status.NodeTemplateSpec.EngineInstallURL == "" {
obj.Status.NodeTemplateSpec.EngineInstallURL = defaultEngineInstallURL
}
rawTemplate, err := m.nodeTemplateGenericClient.Get(obj.Spec.NodeTemplateName, metav1.GetOptions{})
if err != nil {
return obj, err
}
rawConfig, ok := values.GetValue(rawTemplate.(*unstructured.Unstructured).Object, template.Spec.Driver+"Config")
if !ok {
return obj, fmt.Errorf("node config not specified")
}
bytes, err := json.Marshal(rawConfig)
if err != nil {
return obj, errors.Wrap(err, "failed to marshal node driver confg")
}
config, err := nodeconfig.NewNodeConfig(m.secretStore, obj)
if err != nil {
return obj, errors.Wrap(err, "failed to save node driver config")
}
defer config.Cleanup()
config.SetDriverConfig(string(bytes))
return obj, config.Save()
})
return newObj.(*v3.Node), err
}
func (m *Lifecycle) Remove(obj *v3.Node) (*v3.Node, error) {
if obj.Status.NodeTemplateSpec == nil {
return obj, nil
}
found, err := m.isNodeInAppliedSpec(obj)
if err != nil {
return obj, err
}
if found {
return obj, fmt.Errorf("Node [%s] still not deleted from cluster spec", obj.Name)
}
config, err := nodeconfig.NewNodeConfig(m.secretStore, obj)
if err != nil {
return obj, err
}
if err := config.Restore(); err != nil {
return obj, err
}
defer config.Remove()
mExists, err := nodeExists(config.Dir(), obj.Spec.RequestedHostname)
if err != nil {
return obj, err
}
if mExists {
m.logger.Infof(obj, "Removing node %s", obj.Spec.RequestedHostname)
if err := deleteNode(config.Dir(), obj); err != nil {
return obj, err
}
m.logger.Infof(obj, "Removing node %s done", obj.Spec.RequestedHostname)
}
return obj, nil
}
func (m *Lifecycle) provision(driverConfig, nodeDir string, obj *v3.Node) (*v3.Node, error) {
configRawMap := map[string]interface{}{}
if err := json.Unmarshal([]byte(driverConfig), &configRawMap); err != nil {
return obj, errors.Wrap(err, "failed to unmarshal node config")
}
// Since we know this will take a long time persist so user sees status
obj, err := m.nodeClient.Update(obj)
if err != nil {
return obj, err
}
createCommandsArgs := buildCreateCommand(obj, configRawMap)
cmd := buildCommand(nodeDir, createCommandsArgs)
m.logger.Infof(obj, "Provisioning node %s", obj.Spec.RequestedHostname)
stdoutReader, stderrReader, err := startReturnOutput(cmd)
if err != nil {
return obj, err
}
defer stdoutReader.Close()
defer stderrReader.Close()
defer cmd.Wait()
hostExist := false
obj, err = m.reportStatus(stdoutReader, stderrReader, obj)
if err != nil {
if strings.Contains(err.Error(), "Host already exists") {
hostExist = true
}
if !hostExist {
return obj, err
}
}
if err := cmd.Wait(); err != nil && !hostExist {
return obj, err
}
m.logger.Infof(obj, "Provisioning node %s done", obj.Spec.RequestedHostname)
return obj, nil
}
func (m *Lifecycle) ready(obj *v3.Node) (*v3.Node, error) {
config, err := nodeconfig.NewNodeConfig(m.secretStore, obj)
if err != nil {
return obj, err
}
defer config.Cleanup()
if err := config.Restore(); err != nil {
return obj, err
}
driverConfig, err := config.DriverConfig()
if err != nil {
return nil, err
}
// Provision in the background so we can poll and save the config
done := make(chan error)
go func() {
newObj, err := v3.NodeConditionProvisioned.Once(obj, func() (runtime.Object, error) {
return m.provision(driverConfig, config.Dir(), obj)
})
obj = newObj.(*v3.Node)
done <- err
}()
// Poll and save config
outer:
for {
select {
case err = <-done:
break outer
case <-time.After(5 * time.Second):
config.Save()
}
}
newObj, saveError := v3.NodeConditionConfigSaved.Once(obj, func() (runtime.Object, error) {
return m.saveConfig(config, config.Dir(), obj)
})
obj = newObj.(*v3.Node)
if err == nil {
return obj, saveError
}
return obj, err
}
func (m *Lifecycle) Updated(obj *v3.Node) (*v3.Node, error) {
if obj.Status.NodeTemplateSpec == nil {
return obj, nil
}
newObj, err := v3.NodeConditionReady.Once(obj, func() (runtime.Object, error) {
return m.ready(obj)
})
obj = newObj.(*v3.Node)
return obj, err
}
func (m *Lifecycle) saveConfig(config *nodeconfig.NodeConfig, nodeDir string, obj *v3.Node) (*v3.Node, error) {
logrus.Infof("Generating and uploading node config %s", obj.Spec.RequestedHostname)
if err := config.Save(); err != nil {
return obj, err
}
ip, err := config.IP()
if err != nil {
return obj, err
}
interalAddress, err := config.InternalIP()
if err != nil {
return obj, err
}
sshKey, err := getSSHKey(nodeDir, obj)
if err != nil {
return obj, err
}
sshUser, err := config.SSHUser()
if err != nil {
return obj, err
}
if err := config.Save(); err != nil {
return obj, err
}
obj.Status.NodeConfig = &v3.RKEConfigNode{
NodeName: obj.Spec.ClusterName + ":" + obj.Name,
Address: ip,
InternalAddress: interalAddress,
User: sshUser,
Role: roles(obj),
HostnameOverride: obj.Spec.RequestedHostname,
SSHKey: sshKey,
}
if len(obj.Status.NodeConfig.Role) == 0 {
obj.Status.NodeConfig.Role = []string{"worker"}
}
return obj, nil
}
func (m *Lifecycle) isNodeInAppliedSpec(node *v3.Node) (bool, error) {
cluster, err := m.clusterLister.Get("", node.Spec.ClusterName)
if err != nil {
if kerror.IsNotFound(err) {
return false, nil
}
return false, err
}
if cluster == nil {
return false, nil
}
if cluster.DeletionTimestamp != nil {
return false, nil
}
if cluster.Status.AppliedSpec.RancherKubernetesEngineConfig == nil {
return false, nil
}
for _, rkeNode := range cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.Nodes {
nodeName := rkeNode.NodeName
if len(nodeName) == 0 {
continue
}
if nodeName == fmt.Sprintf("%s:%s", node.Namespace, node.Name) {
return true, nil
}
}
return false, nil
}
func validateCustomHost(obj *v3.Node) error {
if obj.Spec.Imported {
return nil
}
customConfig := obj.Spec.CustomConfig
signer, err := ssh.ParsePrivateKey([]byte(customConfig.SSHKey))
if err != nil {
return errors.Wrapf(err, "sshKey format is invalid")
}
config := &ssh.ClientConfig{
User: customConfig.User,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
conn, err := ssh.Dial("tcp", customConfig.Address+":22", config)
if err != nil {
return errors.Wrapf(err, "Failed to validate ssh connection to address [%s]", customConfig.Address)
}
defer conn.Close()
return nil
}
func roles(node *v3.Node) []string {
var roles []string
if node.Spec.Etcd {
roles = append(roles, "etcd")
}
if node.Spec.ControlPlane {
roles = append(roles, "controlplane")
}
if node.Spec.Worker {
roles = append(roles, "worker")
}
if len(roles) == 0 {
return []string{"worker"}
}
return roles
}
Set ready for non-provisioned nodes
package node
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rancher/norman/clientbase"
"github.com/rancher/norman/event"
"github.com/rancher/norman/types/values"
"github.com/rancher/rancher/pkg/encryptedstore"
"github.com/rancher/rancher/pkg/nodeconfig"
"github.com/rancher/types/apis/management.cattle.io/v3"
"github.com/rancher/types/config"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
kerror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
const (
defaultEngineInstallURL = "https://releases.rancher.com/install-docker/17.03.2.sh"
)
func Register(management *config.ManagementContext) {
secretStore, err := nodeconfig.NewStore(management)
if err != nil {
logrus.Fatal(err)
}
nodeClient := management.Management.Nodes("")
nodeLifecycle := &Lifecycle{
secretStore: secretStore,
nodeClient: nodeClient,
nodeTemplateClient: management.Management.NodeTemplates(""),
nodeTemplateGenericClient: management.Management.NodeTemplates("").ObjectClient().UnstructuredClient(),
configMapGetter: management.K8sClient.CoreV1(),
logger: management.EventLogger,
clusterLister: management.Management.Clusters("").Controller().Lister(),
}
nodeClient.AddLifecycle("node-controller", nodeLifecycle)
}
type Lifecycle struct {
secretStore *encryptedstore.GenericEncryptedStore
nodeTemplateGenericClient clientbase.GenericClient
nodeClient v3.NodeInterface
nodeTemplateClient v3.NodeTemplateInterface
configMapGetter typedv1.ConfigMapsGetter
logger event.Logger
clusterLister v3.ClusterLister
}
func (m *Lifecycle) setupCustom(obj *v3.Node) {
obj.Status.NodeConfig = &v3.RKEConfigNode{
NodeName: obj.Spec.ClusterName + ":" + obj.Name,
HostnameOverride: obj.Spec.RequestedHostname,
Address: obj.Spec.CustomConfig.Address,
InternalAddress: obj.Spec.CustomConfig.InternalAddress,
User: obj.Spec.CustomConfig.User,
DockerSocket: obj.Spec.CustomConfig.DockerSocket,
SSHKey: obj.Spec.CustomConfig.SSHKey,
Role: roles(obj),
}
if obj.Status.NodeConfig.User == "" {
obj.Status.NodeConfig.User = "root"
}
}
func isCustom(obj *v3.Node) bool {
return obj.Spec.CustomConfig != nil && obj.Spec.CustomConfig.Address != ""
}
func (m *Lifecycle) Create(obj *v3.Node) (*v3.Node, error) {
if isCustom(obj) {
m.setupCustom(obj)
newObj, err := v3.NodeConditionInitialized.Once(obj, func() (runtime.Object, error) {
if err := validateCustomHost(obj); err != nil {
return obj, err
}
return obj, nil
})
return newObj.(*v3.Node), err
}
if obj.Spec.NodeTemplateName == "" {
return obj, nil
}
newObj, err := v3.NodeConditionInitialized.Once(obj, func() (runtime.Object, error) {
template, err := m.nodeTemplateClient.Get(obj.Spec.NodeTemplateName, metav1.GetOptions{})
if err != nil {
return obj, err
}
obj.Status.NodeTemplateSpec = &template.Spec
if obj.Spec.RequestedHostname == "" {
obj.Spec.RequestedHostname = obj.Name
}
if obj.Status.NodeTemplateSpec.EngineInstallURL == "" {
obj.Status.NodeTemplateSpec.EngineInstallURL = defaultEngineInstallURL
}
rawTemplate, err := m.nodeTemplateGenericClient.Get(obj.Spec.NodeTemplateName, metav1.GetOptions{})
if err != nil {
return obj, err
}
rawConfig, ok := values.GetValue(rawTemplate.(*unstructured.Unstructured).Object, template.Spec.Driver+"Config")
if !ok {
return obj, fmt.Errorf("node config not specified")
}
bytes, err := json.Marshal(rawConfig)
if err != nil {
return obj, errors.Wrap(err, "failed to marshal node driver confg")
}
config, err := nodeconfig.NewNodeConfig(m.secretStore, obj)
if err != nil {
return obj, errors.Wrap(err, "failed to save node driver config")
}
defer config.Cleanup()
config.SetDriverConfig(string(bytes))
return obj, config.Save()
})
return newObj.(*v3.Node), err
}
func (m *Lifecycle) Remove(obj *v3.Node) (*v3.Node, error) {
if obj.Status.NodeTemplateSpec == nil {
return obj, nil
}
found, err := m.isNodeInAppliedSpec(obj)
if err != nil {
return obj, err
}
if found {
return obj, fmt.Errorf("Node [%s] still not deleted from cluster spec", obj.Name)
}
config, err := nodeconfig.NewNodeConfig(m.secretStore, obj)
if err != nil {
return obj, err
}
if err := config.Restore(); err != nil {
return obj, err
}
defer config.Remove()
mExists, err := nodeExists(config.Dir(), obj.Spec.RequestedHostname)
if err != nil {
return obj, err
}
if mExists {
m.logger.Infof(obj, "Removing node %s", obj.Spec.RequestedHostname)
if err := deleteNode(config.Dir(), obj); err != nil {
return obj, err
}
m.logger.Infof(obj, "Removing node %s done", obj.Spec.RequestedHostname)
}
return obj, nil
}
func (m *Lifecycle) provision(driverConfig, nodeDir string, obj *v3.Node) (*v3.Node, error) {
configRawMap := map[string]interface{}{}
if err := json.Unmarshal([]byte(driverConfig), &configRawMap); err != nil {
return obj, errors.Wrap(err, "failed to unmarshal node config")
}
// Since we know this will take a long time persist so user sees status
obj, err := m.nodeClient.Update(obj)
if err != nil {
return obj, err
}
createCommandsArgs := buildCreateCommand(obj, configRawMap)
cmd := buildCommand(nodeDir, createCommandsArgs)
m.logger.Infof(obj, "Provisioning node %s", obj.Spec.RequestedHostname)
stdoutReader, stderrReader, err := startReturnOutput(cmd)
if err != nil {
return obj, err
}
defer stdoutReader.Close()
defer stderrReader.Close()
defer cmd.Wait()
hostExist := false
obj, err = m.reportStatus(stdoutReader, stderrReader, obj)
if err != nil {
if strings.Contains(err.Error(), "Host already exists") {
hostExist = true
}
if !hostExist {
return obj, err
}
}
if err := cmd.Wait(); err != nil && !hostExist {
return obj, err
}
m.logger.Infof(obj, "Provisioning node %s done", obj.Spec.RequestedHostname)
return obj, nil
}
func (m *Lifecycle) ready(obj *v3.Node) (*v3.Node, error) {
config, err := nodeconfig.NewNodeConfig(m.secretStore, obj)
if err != nil {
return obj, err
}
defer config.Cleanup()
if err := config.Restore(); err != nil {
return obj, err
}
driverConfig, err := config.DriverConfig()
if err != nil {
return nil, err
}
// Provision in the background so we can poll and save the config
done := make(chan error)
go func() {
newObj, err := v3.NodeConditionProvisioned.Once(obj, func() (runtime.Object, error) {
return m.provision(driverConfig, config.Dir(), obj)
})
obj = newObj.(*v3.Node)
done <- err
}()
// Poll and save config
outer:
for {
select {
case err = <-done:
break outer
case <-time.After(5 * time.Second):
config.Save()
}
}
newObj, saveError := v3.NodeConditionConfigSaved.Once(obj, func() (runtime.Object, error) {
return m.saveConfig(config, config.Dir(), obj)
})
obj = newObj.(*v3.Node)
if err == nil {
return obj, saveError
}
return obj, err
}
func (m *Lifecycle) Updated(obj *v3.Node) (*v3.Node, error) {
newObj, err := v3.NodeConditionReady.Once(obj, func() (runtime.Object, error) {
if obj.Status.NodeTemplateSpec == nil {
return obj, nil
}
return m.ready(obj)
})
obj = newObj.(*v3.Node)
return obj, err
}
func (m *Lifecycle) saveConfig(config *nodeconfig.NodeConfig, nodeDir string, obj *v3.Node) (*v3.Node, error) {
logrus.Infof("Generating and uploading node config %s", obj.Spec.RequestedHostname)
if err := config.Save(); err != nil {
return obj, err
}
ip, err := config.IP()
if err != nil {
return obj, err
}
interalAddress, err := config.InternalIP()
if err != nil {
return obj, err
}
sshKey, err := getSSHKey(nodeDir, obj)
if err != nil {
return obj, err
}
sshUser, err := config.SSHUser()
if err != nil {
return obj, err
}
if err := config.Save(); err != nil {
return obj, err
}
obj.Status.NodeConfig = &v3.RKEConfigNode{
NodeName: obj.Spec.ClusterName + ":" + obj.Name,
Address: ip,
InternalAddress: interalAddress,
User: sshUser,
Role: roles(obj),
HostnameOverride: obj.Spec.RequestedHostname,
SSHKey: sshKey,
}
if len(obj.Status.NodeConfig.Role) == 0 {
obj.Status.NodeConfig.Role = []string{"worker"}
}
return obj, nil
}
func (m *Lifecycle) isNodeInAppliedSpec(node *v3.Node) (bool, error) {
cluster, err := m.clusterLister.Get("", node.Spec.ClusterName)
if err != nil {
if kerror.IsNotFound(err) {
return false, nil
}
return false, err
}
if cluster == nil {
return false, nil
}
if cluster.DeletionTimestamp != nil {
return false, nil
}
if cluster.Status.AppliedSpec.RancherKubernetesEngineConfig == nil {
return false, nil
}
for _, rkeNode := range cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.Nodes {
nodeName := rkeNode.NodeName
if len(nodeName) == 0 {
continue
}
if nodeName == fmt.Sprintf("%s:%s", node.Namespace, node.Name) {
return true, nil
}
}
return false, nil
}
func validateCustomHost(obj *v3.Node) error {
if obj.Spec.Imported {
return nil
}
customConfig := obj.Spec.CustomConfig
signer, err := ssh.ParsePrivateKey([]byte(customConfig.SSHKey))
if err != nil {
return errors.Wrapf(err, "sshKey format is invalid")
}
config := &ssh.ClientConfig{
User: customConfig.User,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
conn, err := ssh.Dial("tcp", customConfig.Address+":22", config)
if err != nil {
return errors.Wrapf(err, "Failed to validate ssh connection to address [%s]", customConfig.Address)
}
defer conn.Close()
return nil
}
func roles(node *v3.Node) []string {
var roles []string
if node.Spec.Etcd {
roles = append(roles, "etcd")
}
if node.Spec.ControlPlane {
roles = append(roles, "controlplane")
}
if node.Spec.Worker {
roles = append(roles, "worker")
}
if len(roles) == 0 {
return []string{"worker"}
}
return roles
}
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package credentials
import (
"encoding/base64"
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/credentialprovider"
)
// AWSRegions is the complete list of regions known to the AWS cloudprovider
// and credentialprovider.
var AWSRegions = [...]string{
"us-east-1",
"us-west-1",
"us-west-2",
"eu-west-1",
"eu-central-1",
"ap-south-1",
"ap-southeast-1",
"ap-southeast-2",
"ap-northeast-1",
"ap-northeast-2",
"cn-north-1",
"us-gov-west-1",
"sa-east-1",
}
const registryURLTemplate = "*.dkr.ecr.%s.amazonaws.com"
// awsHandlerLogger is a handler that logs all AWS SDK requests
// Copied from pkg/cloudprovider/providers/aws/log_handler.go
func awsHandlerLogger(req *request.Request) {
service := req.ClientInfo.ServiceName
region := req.Config.Region
name := "?"
if req.Operation != nil {
name = req.Operation.Name
}
glog.V(3).Infof("AWS request: %s:%s in %s", service, name, *region)
}
// An interface for testing purposes.
type tokenGetter interface {
GetAuthorizationToken(input *ecr.GetAuthorizationTokenInput) (*ecr.GetAuthorizationTokenOutput, error)
}
// The canonical implementation
type ecrTokenGetter struct {
svc *ecr.ECR
}
func (p *ecrTokenGetter) GetAuthorizationToken(input *ecr.GetAuthorizationTokenInput) (*ecr.GetAuthorizationTokenOutput, error) {
return p.svc.GetAuthorizationToken(input)
}
// lazyEcrProvider is a DockerConfigProvider that creates on demand an
// ecrProvider for a given region and then proxies requests to it.
type lazyEcrProvider struct {
region string
regionURL string
actualProvider *credentialprovider.CachingDockerConfigProvider
}
var _ credentialprovider.DockerConfigProvider = &lazyEcrProvider{}
// ecrProvider is a DockerConfigProvider that gets and refreshes 12-hour tokens
// from AWS to access ECR.
type ecrProvider struct {
region string
regionURL string
getter tokenGetter
}
var _ credentialprovider.DockerConfigProvider = &ecrProvider{}
// Init creates a lazy provider for each AWS region, in order to support
// cross-region ECR access. They have to be lazy because it's unlikely, but not
// impossible, that we'll use more than one.
// Not using the package init() function: this module should be initialized only
// if using the AWS cloud provider. This way, we avoid timeouts waiting for a
// non-existent provider.
func Init() {
for _, region := range AWSRegions {
credentialprovider.RegisterCredentialProvider("aws-ecr-"+region,
&lazyEcrProvider{
region: region,
regionURL: fmt.Sprintf(registryURLTemplate, region),
})
}
}
// Enabled implements DockerConfigProvider.Enabled for the lazy provider.
// Since we perform no checks/work of our own and actualProvider is only created
// later at image pulling time (if ever), always return true.
func (p *lazyEcrProvider) Enabled() bool {
return true
}
// LazyProvide implements DockerConfigProvider.LazyProvide. It will be called
// by the client when attempting to pull an image and it will create the actual
// provider only when we actually need it the first time.
func (p *lazyEcrProvider) LazyProvide() *credentialprovider.DockerConfigEntry {
if p.actualProvider == nil {
glog.V(2).Infof("Creating ecrProvider for %s", p.region)
p.actualProvider = &credentialprovider.CachingDockerConfigProvider{
Provider: newEcrProvider(p.region, nil),
// Refresh credentials a little earlier than expiration time
Lifetime: 11*time.Hour + 55*time.Minute,
}
if !p.actualProvider.Enabled() {
return nil
}
}
entry := p.actualProvider.Provide()[p.regionURL]
return &entry
}
// Provide implements DockerConfigProvider.Provide, creating dummy credentials.
// Client code will call Provider.LazyProvide() at image pulling time.
func (p *lazyEcrProvider) Provide() credentialprovider.DockerConfig {
entry := credentialprovider.DockerConfigEntry{
Provider: p,
}
cfg := credentialprovider.DockerConfig{}
cfg[p.regionURL] = entry
return cfg
}
func newEcrProvider(region string, getter tokenGetter) *ecrProvider {
return &ecrProvider{
region: region,
regionURL: fmt.Sprintf(registryURLTemplate, region),
getter: getter,
}
}
// Enabled implements DockerConfigProvider.Enabled for the AWS token-based implementation.
// For now, it gets activated only if AWS was chosen as the cloud provider.
// TODO: figure how to enable it manually for deployments that are not on AWS but still
// use ECR somehow?
func (p *ecrProvider) Enabled() bool {
if p.region == "" {
glog.Errorf("Called ecrProvider.Enabled() with no region set")
return false
}
getter := &ecrTokenGetter{svc: ecr.New(session.New(&aws.Config{
Credentials: nil,
Region: &p.region,
}))}
getter.svc.Handlers.Sign.PushFrontNamed(request.NamedHandler{
Name: "k8s/logger",
Fn: awsHandlerLogger,
})
p.getter = getter
return true
}
// LazyProvide implements DockerConfigProvider.LazyProvide. Should never be called.
func (p *ecrProvider) LazyProvide() *credentialprovider.DockerConfigEntry {
return nil
}
// Provide implements DockerConfigProvider.Provide, refreshing ECR tokens on demand
func (p *ecrProvider) Provide() credentialprovider.DockerConfig {
cfg := credentialprovider.DockerConfig{}
// TODO: fill in RegistryIds?
params := &ecr.GetAuthorizationTokenInput{}
output, err := p.getter.GetAuthorizationToken(params)
if err != nil {
glog.Errorf("while requesting ECR authorization token %v", err)
return cfg
}
if output == nil {
glog.Errorf("Got back no ECR token")
return cfg
}
for _, data := range output.AuthorizationData {
if data.ProxyEndpoint != nil &&
data.AuthorizationToken != nil {
decodedToken, err := base64.StdEncoding.DecodeString(aws.StringValue(data.AuthorizationToken))
if err != nil {
glog.Errorf("while decoding token for endpoint %v %v", data.ProxyEndpoint, err)
return cfg
}
parts := strings.SplitN(string(decodedToken), ":", 2)
user := parts[0]
password := parts[1]
entry := credentialprovider.DockerConfigEntry{
Username: user,
Password: password,
// ECR doesn't care and Docker is about to obsolete it
Email: "not@val.id",
}
glog.V(3).Infof("Adding credentials for user %s in %s", user, p.region)
// Add our config entry for this region's registry URLs
cfg[p.regionURL] = entry
}
}
return cfg
}
AWS: recognize us-east-2 region
The newly announced region must be added to our list of known regions.
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package credentials
import (
"encoding/base64"
"fmt"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/credentialprovider"
)
// AWSRegions is the complete list of regions known to the AWS cloudprovider
// and credentialprovider.
var AWSRegions = [...]string{
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
"eu-west-1",
"eu-central-1",
"ap-south-1",
"ap-southeast-1",
"ap-southeast-2",
"ap-northeast-1",
"ap-northeast-2",
"cn-north-1",
"us-gov-west-1",
"sa-east-1",
}
const registryURLTemplate = "*.dkr.ecr.%s.amazonaws.com"
// awsHandlerLogger is a handler that logs all AWS SDK requests
// Copied from pkg/cloudprovider/providers/aws/log_handler.go
func awsHandlerLogger(req *request.Request) {
service := req.ClientInfo.ServiceName
region := req.Config.Region
name := "?"
if req.Operation != nil {
name = req.Operation.Name
}
glog.V(3).Infof("AWS request: %s:%s in %s", service, name, *region)
}
// An interface for testing purposes.
type tokenGetter interface {
GetAuthorizationToken(input *ecr.GetAuthorizationTokenInput) (*ecr.GetAuthorizationTokenOutput, error)
}
// The canonical implementation
type ecrTokenGetter struct {
svc *ecr.ECR
}
func (p *ecrTokenGetter) GetAuthorizationToken(input *ecr.GetAuthorizationTokenInput) (*ecr.GetAuthorizationTokenOutput, error) {
return p.svc.GetAuthorizationToken(input)
}
// lazyEcrProvider is a DockerConfigProvider that creates on demand an
// ecrProvider for a given region and then proxies requests to it.
type lazyEcrProvider struct {
region string
regionURL string
actualProvider *credentialprovider.CachingDockerConfigProvider
}
var _ credentialprovider.DockerConfigProvider = &lazyEcrProvider{}
// ecrProvider is a DockerConfigProvider that gets and refreshes 12-hour tokens
// from AWS to access ECR.
type ecrProvider struct {
region string
regionURL string
getter tokenGetter
}
var _ credentialprovider.DockerConfigProvider = &ecrProvider{}
// Init creates a lazy provider for each AWS region, in order to support
// cross-region ECR access. They have to be lazy because it's unlikely, but not
// impossible, that we'll use more than one.
// Not using the package init() function: this module should be initialized only
// if using the AWS cloud provider. This way, we avoid timeouts waiting for a
// non-existent provider.
func Init() {
for _, region := range AWSRegions {
credentialprovider.RegisterCredentialProvider("aws-ecr-"+region,
&lazyEcrProvider{
region: region,
regionURL: fmt.Sprintf(registryURLTemplate, region),
})
}
}
// Enabled implements DockerConfigProvider.Enabled for the lazy provider.
// Since we perform no checks/work of our own and actualProvider is only created
// later at image pulling time (if ever), always return true.
func (p *lazyEcrProvider) Enabled() bool {
return true
}
// LazyProvide implements DockerConfigProvider.LazyProvide. It will be called
// by the client when attempting to pull an image and it will create the actual
// provider only when we actually need it the first time.
func (p *lazyEcrProvider) LazyProvide() *credentialprovider.DockerConfigEntry {
if p.actualProvider == nil {
glog.V(2).Infof("Creating ecrProvider for %s", p.region)
p.actualProvider = &credentialprovider.CachingDockerConfigProvider{
Provider: newEcrProvider(p.region, nil),
// Refresh credentials a little earlier than expiration time
Lifetime: 11*time.Hour + 55*time.Minute,
}
if !p.actualProvider.Enabled() {
return nil
}
}
entry := p.actualProvider.Provide()[p.regionURL]
return &entry
}
// Provide implements DockerConfigProvider.Provide, creating dummy credentials.
// Client code will call Provider.LazyProvide() at image pulling time.
func (p *lazyEcrProvider) Provide() credentialprovider.DockerConfig {
entry := credentialprovider.DockerConfigEntry{
Provider: p,
}
cfg := credentialprovider.DockerConfig{}
cfg[p.regionURL] = entry
return cfg
}
func newEcrProvider(region string, getter tokenGetter) *ecrProvider {
return &ecrProvider{
region: region,
regionURL: fmt.Sprintf(registryURLTemplate, region),
getter: getter,
}
}
// Enabled implements DockerConfigProvider.Enabled for the AWS token-based implementation.
// For now, it gets activated only if AWS was chosen as the cloud provider.
// TODO: figure how to enable it manually for deployments that are not on AWS but still
// use ECR somehow?
func (p *ecrProvider) Enabled() bool {
if p.region == "" {
glog.Errorf("Called ecrProvider.Enabled() with no region set")
return false
}
getter := &ecrTokenGetter{svc: ecr.New(session.New(&aws.Config{
Credentials: nil,
Region: &p.region,
}))}
getter.svc.Handlers.Sign.PushFrontNamed(request.NamedHandler{
Name: "k8s/logger",
Fn: awsHandlerLogger,
})
p.getter = getter
return true
}
// LazyProvide implements DockerConfigProvider.LazyProvide. Should never be called.
func (p *ecrProvider) LazyProvide() *credentialprovider.DockerConfigEntry {
return nil
}
// Provide implements DockerConfigProvider.Provide, refreshing ECR tokens on demand
func (p *ecrProvider) Provide() credentialprovider.DockerConfig {
cfg := credentialprovider.DockerConfig{}
// TODO: fill in RegistryIds?
params := &ecr.GetAuthorizationTokenInput{}
output, err := p.getter.GetAuthorizationToken(params)
if err != nil {
glog.Errorf("while requesting ECR authorization token %v", err)
return cfg
}
if output == nil {
glog.Errorf("Got back no ECR token")
return cfg
}
for _, data := range output.AuthorizationData {
if data.ProxyEndpoint != nil &&
data.AuthorizationToken != nil {
decodedToken, err := base64.StdEncoding.DecodeString(aws.StringValue(data.AuthorizationToken))
if err != nil {
glog.Errorf("while decoding token for endpoint %v %v", data.ProxyEndpoint, err)
return cfg
}
parts := strings.SplitN(string(decodedToken), ":", 2)
user := parts[0]
password := parts[1]
entry := credentialprovider.DockerConfigEntry{
Username: user,
Password: password,
// ECR doesn't care and Docker is about to obsolete it
Email: "not@val.id",
}
glog.V(3).Infof("Adding credentials for user %s in %s", user, p.region)
// Add our config entry for this region's registry URLs
cfg[p.regionURL] = entry
}
}
return cfg
}
|
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by openapi-gen. DO NOT EDIT.
// This file was autogenerated by openapi-gen. Do not edit it manually!
package openapi
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
common "k8s.io/kube-openapi/pkg/common"
spec "k8s.io/kube-openapi/pkg/validation/spec"
)
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{
"k8s.io/api/autoscaling/v1.ContainerResourceMetricSource": schema_k8sio_api_autoscaling_v1_ContainerResourceMetricSource(ref),
"k8s.io/api/autoscaling/v1.ContainerResourceMetricStatus": schema_k8sio_api_autoscaling_v1_ContainerResourceMetricStatus(ref),
"k8s.io/api/autoscaling/v1.CrossVersionObjectReference": schema_k8sio_api_autoscaling_v1_CrossVersionObjectReference(ref),
"k8s.io/api/autoscaling/v1.ExternalMetricSource": schema_k8sio_api_autoscaling_v1_ExternalMetricSource(ref),
"k8s.io/api/autoscaling/v1.ExternalMetricStatus": schema_k8sio_api_autoscaling_v1_ExternalMetricStatus(ref),
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscaler": schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscaler(ref),
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerCondition": schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerCondition(ref),
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerList": schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerList(ref),
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerSpec": schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerSpec(ref),
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerStatus": schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerStatus(ref),
"k8s.io/api/autoscaling/v1.MetricSpec": schema_k8sio_api_autoscaling_v1_MetricSpec(ref),
"k8s.io/api/autoscaling/v1.MetricStatus": schema_k8sio_api_autoscaling_v1_MetricStatus(ref),
"k8s.io/api/autoscaling/v1.ObjectMetricSource": schema_k8sio_api_autoscaling_v1_ObjectMetricSource(ref),
"k8s.io/api/autoscaling/v1.ObjectMetricStatus": schema_k8sio_api_autoscaling_v1_ObjectMetricStatus(ref),
"k8s.io/api/autoscaling/v1.PodsMetricSource": schema_k8sio_api_autoscaling_v1_PodsMetricSource(ref),
"k8s.io/api/autoscaling/v1.PodsMetricStatus": schema_k8sio_api_autoscaling_v1_PodsMetricStatus(ref),
"k8s.io/api/autoscaling/v1.ResourceMetricSource": schema_k8sio_api_autoscaling_v1_ResourceMetricSource(ref),
"k8s.io/api/autoscaling/v1.ResourceMetricStatus": schema_k8sio_api_autoscaling_v1_ResourceMetricStatus(ref),
"k8s.io/api/autoscaling/v1.Scale": schema_k8sio_api_autoscaling_v1_Scale(ref),
"k8s.io/api/autoscaling/v1.ScaleSpec": schema_k8sio_api_autoscaling_v1_ScaleSpec(ref),
"k8s.io/api/autoscaling/v1.ScaleStatus": schema_k8sio_api_autoscaling_v1_ScaleStatus(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup": schema_pkg_apis_meta_v1_APIGroup(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": schema_pkg_apis_meta_v1_APIGroupList(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIResource": schema_pkg_apis_meta_v1_APIResource(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIResourceList": schema_pkg_apis_meta_v1_APIResourceList(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIVersions": schema_pkg_apis_meta_v1_APIVersions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ApplyOptions": schema_pkg_apis_meta_v1_ApplyOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Condition": schema_pkg_apis_meta_v1_Condition(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.CreateOptions": schema_pkg_apis_meta_v1_CreateOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.DeleteOptions": schema_pkg_apis_meta_v1_DeleteOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Duration": schema_pkg_apis_meta_v1_Duration(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1": schema_pkg_apis_meta_v1_FieldsV1(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GetOptions": schema_pkg_apis_meta_v1_GetOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupKind": schema_pkg_apis_meta_v1_GroupKind(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupResource": schema_pkg_apis_meta_v1_GroupResource(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersion": schema_pkg_apis_meta_v1_GroupVersion(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery": schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionKind": schema_pkg_apis_meta_v1_GroupVersionKind(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionResource": schema_pkg_apis_meta_v1_GroupVersionResource(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.InternalEvent": schema_pkg_apis_meta_v1_InternalEvent(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector": schema_pkg_apis_meta_v1_LabelSelector(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement": schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.List": schema_pkg_apis_meta_v1_List(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta": schema_pkg_apis_meta_v1_ListMeta(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ListOptions": schema_pkg_apis_meta_v1_ListOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry": schema_pkg_apis_meta_v1_ManagedFieldsEntry(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime": schema_pkg_apis_meta_v1_MicroTime(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta": schema_pkg_apis_meta_v1_ObjectMeta(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference": schema_pkg_apis_meta_v1_OwnerReference(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata": schema_pkg_apis_meta_v1_PartialObjectMetadata(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadataList": schema_pkg_apis_meta_v1_PartialObjectMetadataList(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Patch": schema_pkg_apis_meta_v1_Patch(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.PatchOptions": schema_pkg_apis_meta_v1_PatchOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions": schema_pkg_apis_meta_v1_Preconditions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.RootPaths": schema_pkg_apis_meta_v1_RootPaths(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR": schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Status": schema_pkg_apis_meta_v1_Status(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause": schema_pkg_apis_meta_v1_StatusCause(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails": schema_pkg_apis_meta_v1_StatusDetails(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Table": schema_pkg_apis_meta_v1_Table(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition": schema_pkg_apis_meta_v1_TableColumnDefinition(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TableOptions": schema_pkg_apis_meta_v1_TableOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TableRow": schema_pkg_apis_meta_v1_TableRow(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition": schema_pkg_apis_meta_v1_TableRowCondition(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Time": schema_pkg_apis_meta_v1_Time(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Timestamp": schema_pkg_apis_meta_v1_Timestamp(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta": schema_pkg_apis_meta_v1_TypeMeta(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.UpdateOptions": schema_pkg_apis_meta_v1_UpdateOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.WatchEvent": schema_pkg_apis_meta_v1_WatchEvent(ref),
"k8s.io/apimachinery/pkg/runtime.RawExtension": schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref),
"k8s.io/apimachinery/pkg/runtime.TypeMeta": schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref),
"k8s.io/apimachinery/pkg/runtime.Unknown": schema_k8sio_apimachinery_pkg_runtime_Unknown(ref),
"k8s.io/apimachinery/pkg/version.Info": schema_k8sio_apimachinery_pkg_version_Info(ref),
}
}
func schema_k8sio_api_autoscaling_v1_ContainerResourceMetricSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in the requests and limits, describing a single container in each of the pods of the current scale target(e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built into Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the name of the resource in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"targetAverageUtilization": {
SchemaProps: spec.SchemaProps{
Description: "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.",
Type: []string{"integer"},
Format: "int32",
},
},
"targetAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"container": {
SchemaProps: spec.SchemaProps{
Description: "container is the name of the container in the pods of the scaling target.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "container"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_autoscaling_v1_ContainerResourceMetricStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the name of the resource in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"currentAverageUtilization": {
SchemaProps: spec.SchemaProps{
Description: "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.",
Type: []string{"integer"},
Format: "int32",
},
},
"currentAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"container": {
SchemaProps: spec.SchemaProps{
Description: "container is the name of the container in the pods of the scaling taget",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "currentAverageValue", "container"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_autoscaling_v1_CrossVersionObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CrossVersionObjectReference contains enough information to let you identify the referred resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "API version of the referent",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"kind", "name"},
},
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-map-type": "atomic",
},
},
},
}
}
func schema_k8sio_api_autoscaling_v1_ExternalMetricSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"metricName": {
SchemaProps: spec.SchemaProps{
Description: "metricName is the name of the metric in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"metricSelector": {
SchemaProps: spec.SchemaProps{
Description: "metricSelector is used to identify a specific time series within a given metric.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
"targetValue": {
SchemaProps: spec.SchemaProps{
Description: "targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"targetAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"metricName"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_autoscaling_v1_ExternalMetricStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"metricName": {
SchemaProps: spec.SchemaProps{
Description: "metricName is the name of a metric used for autoscaling in metric system.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"metricSelector": {
SchemaProps: spec.SchemaProps{
Description: "metricSelector is used to identify a specific time series within a given metric.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
"currentValue": {
SchemaProps: spec.SchemaProps{
Description: "currentValue is the current value of the metric (as a quantity)",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"currentAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "currentAverageValue is the current value of metric averaged over autoscaled pods.",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"metricName", "currentValue"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscaler(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "configuration of a horizontal pod autoscaler.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "current information about the autoscaler.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerSpec", "k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerCondition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "type describes the current condition",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "status is the status of the condition (True, False, Unknown)",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"lastTransitionTime": {
SchemaProps: spec.SchemaProps{
Description: "lastTransitionTime is the last time the condition transitioned from one status to another",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "reason is the reason for the condition's last transition.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "message is a human-readable explanation containing details about the transition",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "list of horizontal pod autoscaler objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "list of horizontal pod autoscaler objects.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.HorizontalPodAutoscaler"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscaler", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "specification of a horizontal pod autoscaler.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"scaleTargetRef": {
SchemaProps: spec.SchemaProps{
Description: "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.CrossVersionObjectReference"),
},
},
"minReplicas": {
SchemaProps: spec.SchemaProps{
Description: "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.",
Type: []string{"integer"},
Format: "int32",
},
},
"maxReplicas": {
SchemaProps: spec.SchemaProps{
Description: "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
"targetCPUUtilizationPercentage": {
SchemaProps: spec.SchemaProps{
Description: "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"scaleTargetRef", "maxReplicas"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.CrossVersionObjectReference"},
}
}
func schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "current status of a horizontal pod autoscaler",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"observedGeneration": {
SchemaProps: spec.SchemaProps{
Description: "most recent generation observed by this autoscaler.",
Type: []string{"integer"},
Format: "int64",
},
},
"lastScaleTime": {
SchemaProps: spec.SchemaProps{
Description: "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"currentReplicas": {
SchemaProps: spec.SchemaProps{
Description: "current number of replicas of pods managed by this autoscaler.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
"desiredReplicas": {
SchemaProps: spec.SchemaProps{
Description: "desired number of replicas of pods managed by this autoscaler.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
"currentCPUUtilizationPercentage": {
SchemaProps: spec.SchemaProps{
Description: "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"currentReplicas", "desiredReplicas"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_autoscaling_v1_MetricSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"object": {
SchemaProps: spec.SchemaProps{
Description: "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
Ref: ref("k8s.io/api/autoscaling/v1.ObjectMetricSource"),
},
},
"pods": {
SchemaProps: spec.SchemaProps{
Description: "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
Ref: ref("k8s.io/api/autoscaling/v1.PodsMetricSource"),
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Description: "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
Ref: ref("k8s.io/api/autoscaling/v1.ResourceMetricSource"),
},
},
"containerResource": {
SchemaProps: spec.SchemaProps{
Description: "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.",
Ref: ref("k8s.io/api/autoscaling/v1.ContainerResourceMetricSource"),
},
},
"external": {
SchemaProps: spec.SchemaProps{
Description: "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
Ref: ref("k8s.io/api/autoscaling/v1.ExternalMetricSource"),
},
},
},
Required: []string{"type"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.ContainerResourceMetricSource", "k8s.io/api/autoscaling/v1.ExternalMetricSource", "k8s.io/api/autoscaling/v1.ObjectMetricSource", "k8s.io/api/autoscaling/v1.PodsMetricSource", "k8s.io/api/autoscaling/v1.ResourceMetricSource"},
}
}
func schema_k8sio_api_autoscaling_v1_MetricStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "MetricStatus describes the last-read state of a single metric.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"object": {
SchemaProps: spec.SchemaProps{
Description: "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
Ref: ref("k8s.io/api/autoscaling/v1.ObjectMetricStatus"),
},
},
"pods": {
SchemaProps: spec.SchemaProps{
Description: "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
Ref: ref("k8s.io/api/autoscaling/v1.PodsMetricStatus"),
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Description: "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
Ref: ref("k8s.io/api/autoscaling/v1.ResourceMetricStatus"),
},
},
"containerResource": {
SchemaProps: spec.SchemaProps{
Description: "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
Ref: ref("k8s.io/api/autoscaling/v1.ContainerResourceMetricStatus"),
},
},
"external": {
SchemaProps: spec.SchemaProps{
Description: "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
Ref: ref("k8s.io/api/autoscaling/v1.ExternalMetricStatus"),
},
},
},
Required: []string{"type"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.ContainerResourceMetricStatus", "k8s.io/api/autoscaling/v1.ExternalMetricStatus", "k8s.io/api/autoscaling/v1.ObjectMetricStatus", "k8s.io/api/autoscaling/v1.PodsMetricStatus", "k8s.io/api/autoscaling/v1.ResourceMetricStatus"},
}
}
func schema_k8sio_api_autoscaling_v1_ObjectMetricSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"target": {
SchemaProps: spec.SchemaProps{
Description: "target is the described Kubernetes object.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.CrossVersionObjectReference"),
},
},
"metricName": {
SchemaProps: spec.SchemaProps{
Description: "metricName is the name of the metric in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"targetValue": {
SchemaProps: spec.SchemaProps{
Description: "targetValue is the target value of the metric (as a quantity).",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "selector is the string-encoded form of a standard kubernetes label selector for the given metric. When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
"averageValue": {
SchemaProps: spec.SchemaProps{
Description: "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"target", "metricName", "targetValue"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.CrossVersionObjectReference", "k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_autoscaling_v1_ObjectMetricStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"target": {
SchemaProps: spec.SchemaProps{
Description: "target is the described Kubernetes object.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.CrossVersionObjectReference"),
},
},
"metricName": {
SchemaProps: spec.SchemaProps{
Description: "metricName is the name of the metric in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"currentValue": {
SchemaProps: spec.SchemaProps{
Description: "currentValue is the current value of the metric (as a quantity).",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
"averageValue": {
SchemaProps: spec.SchemaProps{
Description: "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"target", "metricName", "currentValue"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.CrossVersionObjectReference", "k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_autoscaling_v1_PodsMetricSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"metricName": {
SchemaProps: spec.SchemaProps{
Description: "metricName is the name of the metric in question",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"targetAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
},
Required: []string{"metricName", "targetAverageValue"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_autoscaling_v1_PodsMetricStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"metricName": {
SchemaProps: spec.SchemaProps{
Description: "metricName is the name of the metric in question",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"currentAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
},
Required: []string{"metricName", "currentAverageValue"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_autoscaling_v1_ResourceMetricSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the name of the resource in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"targetAverageUtilization": {
SchemaProps: spec.SchemaProps{
Description: "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.",
Type: []string{"integer"},
Format: "int32",
},
},
"targetAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_autoscaling_v1_ResourceMetricStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the name of the resource in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"currentAverageUtilization": {
SchemaProps: spec.SchemaProps{
Description: "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.",
Type: []string{"integer"},
Format: "int32",
},
},
"currentAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"name", "currentAverageValue"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_autoscaling_v1_Scale(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Scale represents a scaling request for a resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.ScaleSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.ScaleStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.ScaleSpec", "k8s.io/api/autoscaling/v1.ScaleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_autoscaling_v1_ScaleSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ScaleSpec describes the attributes of a scale subresource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "desired number of instances for the scaled object.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
}
}
func schema_k8sio_api_autoscaling_v1_ScaleStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ScaleStatus represents the current status of a scale subresource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "actual number of observed instances of the scaled object.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"replicas"},
},
},
}
}
func schema_pkg_apis_meta_v1_APIGroup(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIGroup contains the name, the supported versions, and the preferred version of a group.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the name of the group.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"versions": {
SchemaProps: spec.SchemaProps{
Description: "versions are the versions supported in this group.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery"),
},
},
},
},
},
"preferredVersion": {
SchemaProps: spec.SchemaProps{
Description: "preferredVersion is the version preferred by the API server, which probably is the storage version.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery"),
},
},
"serverAddressByClientCIDRs": {
SchemaProps: spec.SchemaProps{
Description: "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"),
},
},
},
},
},
},
Required: []string{"name", "versions"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery", "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"},
}
}
func schema_pkg_apis_meta_v1_APIGroupList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"groups": {
SchemaProps: spec.SchemaProps{
Description: "groups is a list of APIGroup.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup"),
},
},
},
},
},
},
Required: []string{"groups"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup"},
}
}
func schema_pkg_apis_meta_v1_APIResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIResource specifies the name of a resource and whether it is namespaced.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the plural name of the resource.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"singularName": {
SchemaProps: spec.SchemaProps{
Description: "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"namespaced": {
SchemaProps: spec.SchemaProps{
Description: "namespaced indicates if a resource is namespaced or not.",
Default: false,
Type: []string{"boolean"},
Format: "",
},
},
"group": {
SchemaProps: spec.SchemaProps{
Description: "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".",
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Description: "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"verbs": {
SchemaProps: spec.SchemaProps{
Description: "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"shortNames": {
SchemaProps: spec.SchemaProps{
Description: "shortNames is a list of suggested short names of the resource.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"categories": {
SchemaProps: spec.SchemaProps{
Description: "categories is a list of the grouped resources this resource belongs to (e.g. 'all')",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"storageVersionHash": {
SchemaProps: spec.SchemaProps{
Description: "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "singularName", "namespaced", "kind", "verbs"},
},
},
}
}
func schema_pkg_apis_meta_v1_APIResourceList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"groupVersion": {
SchemaProps: spec.SchemaProps{
Description: "groupVersion is the group and version this APIResourceList is for.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "resources contains the name of the resources and if they are namespaced.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.APIResource"),
},
},
},
},
},
},
Required: []string{"groupVersion", "resources"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.APIResource"},
}
}
func schema_pkg_apis_meta_v1_APIVersions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"versions": {
SchemaProps: spec.SchemaProps{
Description: "versions are the api versions that are available.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"serverAddressByClientCIDRs": {
SchemaProps: spec.SchemaProps{
Description: "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"),
},
},
},
},
},
},
Required: []string{"versions", "serverAddressByClientCIDRs"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"},
}
}
func schema_pkg_apis_meta_v1_ApplyOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ApplyOptions may be provided when applying an API object. FieldManager is required for apply requests. ApplyOptions is equivalent to PatchOptions. It is provided as a convenience with documentation that speaks specifically to how the options fields relate to apply.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"force": {
SchemaProps: spec.SchemaProps{
Description: "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people.",
Default: false,
Type: []string{"boolean"},
Format: "",
},
},
"fieldManager": {
SchemaProps: spec.SchemaProps{
Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"force", "fieldManager"},
},
},
}
}
func schema_pkg_apis_meta_v1_Condition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Condition contains details for one aspect of the current state of this API Resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "type of condition in CamelCase or in foo.example.com/CamelCase.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "status of the condition, one of True, False, Unknown.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"observedGeneration": {
SchemaProps: spec.SchemaProps{
Description: "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.",
Type: []string{"integer"},
Format: "int64",
},
},
"lastTransitionTime": {
SchemaProps: spec.SchemaProps{
Description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "message is a human readable message indicating details about the transition. This may be an empty string.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status", "lastTransitionTime", "reason", "message"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_meta_v1_CreateOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CreateOptions may be provided when creating an API object.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"fieldManager": {
SchemaProps: spec.SchemaProps{
Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_DeleteOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DeleteOptions may be provided when deleting an API object.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"gracePeriodSeconds": {
SchemaProps: spec.SchemaProps{
Description: "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
Type: []string{"integer"},
Format: "int64",
},
},
"preconditions": {
SchemaProps: spec.SchemaProps{
Description: "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions"),
},
},
"orphanDependents": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
Type: []string{"boolean"},
Format: "",
},
},
"propagationPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions"},
}
}
func schema_pkg_apis_meta_v1_Duration(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Duration is a wrapper around time.Duration which supports correct marshaling to YAML and JSON. In particular, it marshals into strings, which can be used as map keys in json.",
Type: v1.Duration{}.OpenAPISchemaType(),
Format: v1.Duration{}.OpenAPISchemaFormat(),
},
},
}
}
func schema_pkg_apis_meta_v1_FieldsV1(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:<name>', where <name> is the name of a field in a struct, or key in a map 'v:<value>', where <value> is the exact json formatted value of a list item 'i:<index>', where <index> is position of a item in a list 'k:<keys>', where <keys> is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff",
Type: []string{"object"},
},
},
}
}
func schema_pkg_apis_meta_v1_GetOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GetOptions is the standard query options to the standard REST get call.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupKind(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying concepts during lookup stages without having partially valid types",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "kind"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying concepts during lookup stages without having partially valid types",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "resource"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupVersion(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupVersion contains the \"group\" and the \"version\", which uniquely identifies the API.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "version"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"groupVersion": {
SchemaProps: spec.SchemaProps{
Description: "groupVersion specifies the API group and version in the form \"group/version\"",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Description: "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"groupVersion", "version"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupVersionKind(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "version", "kind"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupVersionResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "version", "resource"},
},
},
}
}
func schema_pkg_apis_meta_v1_InternalEvent(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "InternalEvent makes watch.Event versioned",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"Type": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"Object": {
SchemaProps: spec.SchemaProps{
Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Bookmark: the object (instance of a type being watched) where\n only ResourceVersion field is set. On successful restart of watch from a\n bookmark resourceVersion, client is guaranteed to not get repeat event\n nor miss any events.\n * If Type is Error: *api.Status is recommended; other types may make sense\n depending on context.",
Ref: ref("k8s.io/apimachinery/pkg/runtime.Object"),
},
},
},
Required: []string{"Type", "Object"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/runtime.Object"},
}
}
func schema_pkg_apis_meta_v1_LabelSelector(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"matchLabels": {
SchemaProps: spec.SchemaProps{
Description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"matchExpressions": {
SchemaProps: spec.SchemaProps{
Description: "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement"),
},
},
},
},
},
},
},
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-map-type": "atomic",
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement"},
}
}
func schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"key": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "key",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "key is the label key that the selector applies to.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"operator": {
SchemaProps: spec.SchemaProps{
Description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"values": {
SchemaProps: spec.SchemaProps{
Description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"key", "operator"},
},
},
}
}
func schema_pkg_apis_meta_v1_List(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "List holds a list of objects, which may not be known by the server.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of objects",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_pkg_apis_meta_v1_ListMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"selfLink": {
SchemaProps: spec.SchemaProps{
Description: "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
Type: []string{"string"},
Format: "",
},
},
"continue": {
SchemaProps: spec.SchemaProps{
Description: "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.",
Type: []string{"string"},
Format: "",
},
},
"remainingItemCount": {
SchemaProps: spec.SchemaProps{
Description: "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.",
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_ListOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ListOptions is the query options to a standard REST list call.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"labelSelector": {
SchemaProps: spec.SchemaProps{
Description: "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
Type: []string{"string"},
Format: "",
},
},
"fieldSelector": {
SchemaProps: spec.SchemaProps{
Description: "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
Type: []string{"string"},
Format: "",
},
},
"watch": {
SchemaProps: spec.SchemaProps{
Description: "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
Type: []string{"boolean"},
Format: "",
},
},
"allowWatchBookmarks": {
SchemaProps: spec.SchemaProps{
Description: "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
Type: []string{"boolean"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
Type: []string{"string"},
Format: "",
},
},
"resourceVersionMatch": {
SchemaProps: spec.SchemaProps{
Description: "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
Type: []string{"string"},
Format: "",
},
},
"timeoutSeconds": {
SchemaProps: spec.SchemaProps{
Description: "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
Type: []string{"integer"},
Format: "int64",
},
},
"limit": {
SchemaProps: spec.SchemaProps{
Description: "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
Type: []string{"integer"},
Format: "int64",
},
},
"continue": {
SchemaProps: spec.SchemaProps{
Description: "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_ManagedFieldsEntry(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"manager": {
SchemaProps: spec.SchemaProps{
Description: "Manager is an identifier of the workflow managing these fields.",
Type: []string{"string"},
Format: "",
},
},
"operation": {
SchemaProps: spec.SchemaProps{
Description: "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.",
Type: []string{"string"},
Format: "",
},
},
"time": {
SchemaProps: spec.SchemaProps{
Description: "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"fieldsType": {
SchemaProps: spec.SchemaProps{
Description: "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"",
Type: []string{"string"},
Format: "",
},
},
"fieldsV1": {
SchemaProps: spec.SchemaProps{
Description: "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1"),
},
},
"subresource": {
SchemaProps: spec.SchemaProps{
Description: "Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_meta_v1_MicroTime(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "MicroTime is version of Time with microsecond level precision.",
Type: v1.MicroTime{}.OpenAPISchemaType(),
Format: v1.MicroTime{}.OpenAPISchemaFormat(),
},
},
}
}
func schema_pkg_apis_meta_v1_ObjectMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Type: []string{"string"},
Format: "",
},
},
"generateName": {
SchemaProps: spec.SchemaProps{
Description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency",
Type: []string{"string"},
Format: "",
},
},
"namespace": {
SchemaProps: spec.SchemaProps{
Description: "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
Type: []string{"string"},
Format: "",
},
},
"selfLink": {
SchemaProps: spec.SchemaProps{
Description: "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
Type: []string{"string"},
Format: "",
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Description: "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
Type: []string{"string"},
Format: "",
},
},
"generation": {
SchemaProps: spec.SchemaProps{
Description: "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.",
Type: []string{"integer"},
Format: "int64",
},
},
"creationTimestamp": {
SchemaProps: spec.SchemaProps{
Description: "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"deletionTimestamp": {
SchemaProps: spec.SchemaProps{
Description: "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"deletionGracePeriodSeconds": {
SchemaProps: spec.SchemaProps{
Description: "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.",
Type: []string{"integer"},
Format: "int64",
},
},
"labels": {
SchemaProps: spec.SchemaProps{
Description: "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"ownerReferences": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "uid",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"),
},
},
},
},
},
"finalizers": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"clusterName": {
SchemaProps: spec.SchemaProps{
Description: "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.",
Type: []string{"string"},
Format: "",
},
},
"managedFields": {
SchemaProps: spec.SchemaProps{
Description: "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry", "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_meta_v1_OwnerReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "API version of the referent.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Description: "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"controller": {
SchemaProps: spec.SchemaProps{
Description: "If true, this reference points to the managing controller.",
Type: []string{"boolean"},
Format: "",
},
},
"blockOwnerDeletion": {
SchemaProps: spec.SchemaProps{
Description: "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"apiVersion", "kind", "name", "uid"},
},
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-map-type": "atomic",
},
},
},
}
}
func schema_pkg_apis_meta_v1_PartialObjectMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_meta_v1_PartialObjectMetadataList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PartialObjectMetadataList contains a list of objects containing only their metadata",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "items contains each of the included items.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata"},
}
}
func schema_pkg_apis_meta_v1_Patch(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.",
Type: []string{"object"},
},
},
}
}
func schema_pkg_apis_meta_v1_PatchOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"force": {
SchemaProps: spec.SchemaProps{
Description: "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
Type: []string{"boolean"},
Format: "",
},
},
"fieldManager": {
SchemaProps: spec.SchemaProps{
Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_Preconditions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"uid": {
SchemaProps: spec.SchemaProps{
Description: "Specifies the target UID.",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "Specifies the target ResourceVersion",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_RootPaths(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"paths": {
SchemaProps: spec.SchemaProps{
Description: "paths are the paths available at root.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"paths"},
},
},
}
}
func schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"clientCIDR": {
SchemaProps: spec.SchemaProps{
Description: "The CIDR with which clients can match their IP to figure out the server address that they should use.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"serverAddress": {
SchemaProps: spec.SchemaProps{
Description: "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"clientCIDR", "serverAddress"},
},
},
}
}
func schema_pkg_apis_meta_v1_Status(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Status is a return value for calls that don't return other objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "A human-readable description of the status of this operation.",
Type: []string{"string"},
Format: "",
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.",
Type: []string{"string"},
Format: "",
},
},
"details": {
SchemaProps: spec.SchemaProps{
Description: "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails"),
},
},
"code": {
SchemaProps: spec.SchemaProps{
Description: "Suggested HTTP return code for this status, 0 if not set.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails"},
}
}
func schema_pkg_apis_meta_v1_StatusCause(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"reason": {
SchemaProps: spec.SchemaProps{
Description: "A machine-readable description of the cause of the error. If this value is empty there is no information available.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "A human-readable description of the cause of the error. This field may be presented as-is to a reader.",
Type: []string{"string"},
Format: "",
},
},
"field": {
SchemaProps: spec.SchemaProps{
Description: "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_StatusDetails(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).",
Type: []string{"string"},
Format: "",
},
},
"group": {
SchemaProps: spec.SchemaProps{
Description: "The group attribute of the resource associated with the status StatusReason.",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Description: "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
Type: []string{"string"},
Format: "",
},
},
"causes": {
SchemaProps: spec.SchemaProps{
Description: "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause"),
},
},
},
},
},
"retryAfterSeconds": {
SchemaProps: spec.SchemaProps{
Description: "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause"},
}
}
func schema_pkg_apis_meta_v1_Table(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"columnDefinitions": {
SchemaProps: spec.SchemaProps{
Description: "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition"),
},
},
},
},
},
"rows": {
SchemaProps: spec.SchemaProps{
Description: "rows is the list of items in the table.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.TableRow"),
},
},
},
},
},
},
Required: []string{"columnDefinitions", "rows"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition", "k8s.io/apimachinery/pkg/apis/meta/v1.TableRow"},
}
}
func schema_pkg_apis_meta_v1_TableColumnDefinition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TableColumnDefinition contains information about a column returned in the Table.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is a human readable name for the column.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"format": {
SchemaProps: spec.SchemaProps{
Description: "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "description is a human readable description of this column.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"priority": {
SchemaProps: spec.SchemaProps{
Description: "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"name", "type", "format", "description", "priority"},
},
},
}
}
func schema_pkg_apis_meta_v1_TableOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TableOptions are used when a Table is requested by the caller.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"includeObject": {
SchemaProps: spec.SchemaProps{
Description: "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_TableRow(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TableRow is an individual row in a table.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"cells": {
SchemaProps: spec.SchemaProps{
Description: "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
"conditions": {
SchemaProps: spec.SchemaProps{
Description: "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition"),
},
},
},
},
},
"object": {
SchemaProps: spec.SchemaProps{
Description: "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
},
Required: []string{"cells"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition", "k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_pkg_apis_meta_v1_TableRowCondition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TableRowCondition allows a row to be marked with additional information.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status of the condition, one of True, False, Unknown.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "(brief) machine readable reason for the condition's last transition.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "Human readable message indicating details about last transition.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status"},
},
},
}
}
func schema_pkg_apis_meta_v1_Time(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.",
Type: v1.Time{}.OpenAPISchemaType(),
Format: v1.Time{}.OpenAPISchemaFormat(),
},
},
}
}
func schema_pkg_apis_meta_v1_Timestamp(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Timestamp is a struct that is equivalent to Time, but intended for protobuf marshalling/unmarshalling. It is generated into a serialization that matches Time. Do not use in Go structs.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"seconds": {
SchemaProps: spec.SchemaProps{
Description: "Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.",
Default: 0,
Type: []string{"integer"},
Format: "int64",
},
},
"nanos": {
SchemaProps: spec.SchemaProps{
Description: "Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive. This field may be limited in precision depending on context.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"seconds", "nanos"},
},
},
}
}
func schema_pkg_apis_meta_v1_TypeMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_UpdateOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"fieldManager": {
SchemaProps: spec.SchemaProps{
Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_WatchEvent(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Event represents a single event to a watched resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"object": {
SchemaProps: spec.SchemaProps{
Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
},
Required: []string{"type", "object"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
Type: []string{"object"},
},
},
}
}
func schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this: type MyAwesomeAPIObject struct {\n runtime.TypeMeta `json:\",inline\"`\n ... // other fields\n} func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_apimachinery_pkg_runtime_Unknown(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Unknown allows api objects with unknown types to be passed-through. This can be used to deal with the API objects from a plug-in. Unknown objects still have functioning TypeMeta features-- kind, version, etc. metadata and field mutatation.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"Raw": {
SchemaProps: spec.SchemaProps{
Description: "Raw will hold the complete serialized object which couldn't be matched with a registered type. Most likely, nothing should be done with this except for passing it through the system.",
Type: []string{"string"},
Format: "byte",
},
},
"ContentEncoding": {
SchemaProps: spec.SchemaProps{
Description: "ContentEncoding is encoding used to encode 'Raw' data. Unspecified means no encoding.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"ContentType": {
SchemaProps: spec.SchemaProps{
Description: "ContentType is serialization method used to serialize 'Raw'. Unspecified means ContentTypeJSON.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"Raw", "ContentEncoding", "ContentType"},
},
},
}
}
func schema_k8sio_apimachinery_pkg_version_Info(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Info contains versioning information. how we'll want to distribute that information.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"major": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"minor": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"gitVersion": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"gitCommit": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"gitTreeState": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"buildDate": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"goVersion": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"compiler": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"platform": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"major", "minor", "gitVersion", "gitCommit", "gitTreeState", "buildDate", "goVersion", "compiler", "platform"},
},
},
}
}
Fix typo coersion -> coercion
Signed-off-by: Mateusz Gozdek <mgozdek@microsoft.com>
Kubernetes-commit: 53892932973a3c400550c7854423e7fd5f2f9067
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by openapi-gen. DO NOT EDIT.
// This file was autogenerated by openapi-gen. Do not edit it manually!
package openapi
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
common "k8s.io/kube-openapi/pkg/common"
spec "k8s.io/kube-openapi/pkg/validation/spec"
)
func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
return map[string]common.OpenAPIDefinition{
"k8s.io/api/autoscaling/v1.ContainerResourceMetricSource": schema_k8sio_api_autoscaling_v1_ContainerResourceMetricSource(ref),
"k8s.io/api/autoscaling/v1.ContainerResourceMetricStatus": schema_k8sio_api_autoscaling_v1_ContainerResourceMetricStatus(ref),
"k8s.io/api/autoscaling/v1.CrossVersionObjectReference": schema_k8sio_api_autoscaling_v1_CrossVersionObjectReference(ref),
"k8s.io/api/autoscaling/v1.ExternalMetricSource": schema_k8sio_api_autoscaling_v1_ExternalMetricSource(ref),
"k8s.io/api/autoscaling/v1.ExternalMetricStatus": schema_k8sio_api_autoscaling_v1_ExternalMetricStatus(ref),
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscaler": schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscaler(ref),
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerCondition": schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerCondition(ref),
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerList": schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerList(ref),
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerSpec": schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerSpec(ref),
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerStatus": schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerStatus(ref),
"k8s.io/api/autoscaling/v1.MetricSpec": schema_k8sio_api_autoscaling_v1_MetricSpec(ref),
"k8s.io/api/autoscaling/v1.MetricStatus": schema_k8sio_api_autoscaling_v1_MetricStatus(ref),
"k8s.io/api/autoscaling/v1.ObjectMetricSource": schema_k8sio_api_autoscaling_v1_ObjectMetricSource(ref),
"k8s.io/api/autoscaling/v1.ObjectMetricStatus": schema_k8sio_api_autoscaling_v1_ObjectMetricStatus(ref),
"k8s.io/api/autoscaling/v1.PodsMetricSource": schema_k8sio_api_autoscaling_v1_PodsMetricSource(ref),
"k8s.io/api/autoscaling/v1.PodsMetricStatus": schema_k8sio_api_autoscaling_v1_PodsMetricStatus(ref),
"k8s.io/api/autoscaling/v1.ResourceMetricSource": schema_k8sio_api_autoscaling_v1_ResourceMetricSource(ref),
"k8s.io/api/autoscaling/v1.ResourceMetricStatus": schema_k8sio_api_autoscaling_v1_ResourceMetricStatus(ref),
"k8s.io/api/autoscaling/v1.Scale": schema_k8sio_api_autoscaling_v1_Scale(ref),
"k8s.io/api/autoscaling/v1.ScaleSpec": schema_k8sio_api_autoscaling_v1_ScaleSpec(ref),
"k8s.io/api/autoscaling/v1.ScaleStatus": schema_k8sio_api_autoscaling_v1_ScaleStatus(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup": schema_pkg_apis_meta_v1_APIGroup(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": schema_pkg_apis_meta_v1_APIGroupList(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIResource": schema_pkg_apis_meta_v1_APIResource(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIResourceList": schema_pkg_apis_meta_v1_APIResourceList(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.APIVersions": schema_pkg_apis_meta_v1_APIVersions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ApplyOptions": schema_pkg_apis_meta_v1_ApplyOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Condition": schema_pkg_apis_meta_v1_Condition(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.CreateOptions": schema_pkg_apis_meta_v1_CreateOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.DeleteOptions": schema_pkg_apis_meta_v1_DeleteOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Duration": schema_pkg_apis_meta_v1_Duration(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1": schema_pkg_apis_meta_v1_FieldsV1(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GetOptions": schema_pkg_apis_meta_v1_GetOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupKind": schema_pkg_apis_meta_v1_GroupKind(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupResource": schema_pkg_apis_meta_v1_GroupResource(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersion": schema_pkg_apis_meta_v1_GroupVersion(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery": schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionKind": schema_pkg_apis_meta_v1_GroupVersionKind(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionResource": schema_pkg_apis_meta_v1_GroupVersionResource(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.InternalEvent": schema_pkg_apis_meta_v1_InternalEvent(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector": schema_pkg_apis_meta_v1_LabelSelector(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement": schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.List": schema_pkg_apis_meta_v1_List(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta": schema_pkg_apis_meta_v1_ListMeta(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ListOptions": schema_pkg_apis_meta_v1_ListOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry": schema_pkg_apis_meta_v1_ManagedFieldsEntry(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime": schema_pkg_apis_meta_v1_MicroTime(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta": schema_pkg_apis_meta_v1_ObjectMeta(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference": schema_pkg_apis_meta_v1_OwnerReference(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata": schema_pkg_apis_meta_v1_PartialObjectMetadata(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadataList": schema_pkg_apis_meta_v1_PartialObjectMetadataList(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Patch": schema_pkg_apis_meta_v1_Patch(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.PatchOptions": schema_pkg_apis_meta_v1_PatchOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions": schema_pkg_apis_meta_v1_Preconditions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.RootPaths": schema_pkg_apis_meta_v1_RootPaths(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR": schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Status": schema_pkg_apis_meta_v1_Status(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause": schema_pkg_apis_meta_v1_StatusCause(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails": schema_pkg_apis_meta_v1_StatusDetails(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Table": schema_pkg_apis_meta_v1_Table(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition": schema_pkg_apis_meta_v1_TableColumnDefinition(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TableOptions": schema_pkg_apis_meta_v1_TableOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TableRow": schema_pkg_apis_meta_v1_TableRow(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition": schema_pkg_apis_meta_v1_TableRowCondition(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Time": schema_pkg_apis_meta_v1_Time(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.Timestamp": schema_pkg_apis_meta_v1_Timestamp(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta": schema_pkg_apis_meta_v1_TypeMeta(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.UpdateOptions": schema_pkg_apis_meta_v1_UpdateOptions(ref),
"k8s.io/apimachinery/pkg/apis/meta/v1.WatchEvent": schema_pkg_apis_meta_v1_WatchEvent(ref),
"k8s.io/apimachinery/pkg/runtime.RawExtension": schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref),
"k8s.io/apimachinery/pkg/runtime.TypeMeta": schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref),
"k8s.io/apimachinery/pkg/runtime.Unknown": schema_k8sio_apimachinery_pkg_runtime_Unknown(ref),
"k8s.io/apimachinery/pkg/version.Info": schema_k8sio_apimachinery_pkg_version_Info(ref),
}
}
func schema_k8sio_api_autoscaling_v1_ContainerResourceMetricSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in the requests and limits, describing a single container in each of the pods of the current scale target(e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built into Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the name of the resource in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"targetAverageUtilization": {
SchemaProps: spec.SchemaProps{
Description: "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.",
Type: []string{"integer"},
Format: "int32",
},
},
"targetAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"container": {
SchemaProps: spec.SchemaProps{
Description: "container is the name of the container in the pods of the scaling target.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "container"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_autoscaling_v1_ContainerResourceMetricStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the name of the resource in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"currentAverageUtilization": {
SchemaProps: spec.SchemaProps{
Description: "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.",
Type: []string{"integer"},
Format: "int32",
},
},
"currentAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"container": {
SchemaProps: spec.SchemaProps{
Description: "container is the name of the container in the pods of the scaling taget",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "currentAverageValue", "container"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_autoscaling_v1_CrossVersionObjectReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CrossVersionObjectReference contains enough information to let you identify the referred resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "API version of the referent",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"kind", "name"},
},
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-map-type": "atomic",
},
},
},
}
}
func schema_k8sio_api_autoscaling_v1_ExternalMetricSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"metricName": {
SchemaProps: spec.SchemaProps{
Description: "metricName is the name of the metric in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"metricSelector": {
SchemaProps: spec.SchemaProps{
Description: "metricSelector is used to identify a specific time series within a given metric.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
"targetValue": {
SchemaProps: spec.SchemaProps{
Description: "targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"targetAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"metricName"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_autoscaling_v1_ExternalMetricStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"metricName": {
SchemaProps: spec.SchemaProps{
Description: "metricName is the name of a metric used for autoscaling in metric system.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"metricSelector": {
SchemaProps: spec.SchemaProps{
Description: "metricSelector is used to identify a specific time series within a given metric.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
"currentValue": {
SchemaProps: spec.SchemaProps{
Description: "currentValue is the current value of the metric (as a quantity)",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"currentAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "currentAverageValue is the current value of metric averaged over autoscaled pods.",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"metricName", "currentValue"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscaler(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "configuration of a horizontal pod autoscaler.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "current information about the autoscaler.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerSpec", "k8s.io/api/autoscaling/v1.HorizontalPodAutoscalerStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerCondition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "type describes the current condition",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "status is the status of the condition (True, False, Unknown)",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"lastTransitionTime": {
SchemaProps: spec.SchemaProps{
Description: "lastTransitionTime is the last time the condition transitioned from one status to another",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "reason is the reason for the condition's last transition.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "message is a human-readable explanation containing details about the transition",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "list of horizontal pod autoscaler objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "list of horizontal pod autoscaler objects.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.HorizontalPodAutoscaler"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.HorizontalPodAutoscaler", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
}
}
func schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "specification of a horizontal pod autoscaler.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"scaleTargetRef": {
SchemaProps: spec.SchemaProps{
Description: "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.CrossVersionObjectReference"),
},
},
"minReplicas": {
SchemaProps: spec.SchemaProps{
Description: "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.",
Type: []string{"integer"},
Format: "int32",
},
},
"maxReplicas": {
SchemaProps: spec.SchemaProps{
Description: "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
"targetCPUUtilizationPercentage": {
SchemaProps: spec.SchemaProps{
Description: "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"scaleTargetRef", "maxReplicas"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.CrossVersionObjectReference"},
}
}
func schema_k8sio_api_autoscaling_v1_HorizontalPodAutoscalerStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "current status of a horizontal pod autoscaler",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"observedGeneration": {
SchemaProps: spec.SchemaProps{
Description: "most recent generation observed by this autoscaler.",
Type: []string{"integer"},
Format: "int64",
},
},
"lastScaleTime": {
SchemaProps: spec.SchemaProps{
Description: "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"currentReplicas": {
SchemaProps: spec.SchemaProps{
Description: "current number of replicas of pods managed by this autoscaler.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
"desiredReplicas": {
SchemaProps: spec.SchemaProps{
Description: "desired number of replicas of pods managed by this autoscaler.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
"currentCPUUtilizationPercentage": {
SchemaProps: spec.SchemaProps{
Description: "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.",
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"currentReplicas", "desiredReplicas"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_k8sio_api_autoscaling_v1_MetricSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"object": {
SchemaProps: spec.SchemaProps{
Description: "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
Ref: ref("k8s.io/api/autoscaling/v1.ObjectMetricSource"),
},
},
"pods": {
SchemaProps: spec.SchemaProps{
Description: "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
Ref: ref("k8s.io/api/autoscaling/v1.PodsMetricSource"),
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Description: "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
Ref: ref("k8s.io/api/autoscaling/v1.ResourceMetricSource"),
},
},
"containerResource": {
SchemaProps: spec.SchemaProps{
Description: "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.",
Ref: ref("k8s.io/api/autoscaling/v1.ContainerResourceMetricSource"),
},
},
"external": {
SchemaProps: spec.SchemaProps{
Description: "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
Ref: ref("k8s.io/api/autoscaling/v1.ExternalMetricSource"),
},
},
},
Required: []string{"type"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.ContainerResourceMetricSource", "k8s.io/api/autoscaling/v1.ExternalMetricSource", "k8s.io/api/autoscaling/v1.ObjectMetricSource", "k8s.io/api/autoscaling/v1.PodsMetricSource", "k8s.io/api/autoscaling/v1.ResourceMetricSource"},
}
}
func schema_k8sio_api_autoscaling_v1_MetricStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "MetricStatus describes the last-read state of a single metric.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"object": {
SchemaProps: spec.SchemaProps{
Description: "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).",
Ref: ref("k8s.io/api/autoscaling/v1.ObjectMetricStatus"),
},
},
"pods": {
SchemaProps: spec.SchemaProps{
Description: "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
Ref: ref("k8s.io/api/autoscaling/v1.PodsMetricStatus"),
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Description: "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
Ref: ref("k8s.io/api/autoscaling/v1.ResourceMetricStatus"),
},
},
"containerResource": {
SchemaProps: spec.SchemaProps{
Description: "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
Ref: ref("k8s.io/api/autoscaling/v1.ContainerResourceMetricStatus"),
},
},
"external": {
SchemaProps: spec.SchemaProps{
Description: "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).",
Ref: ref("k8s.io/api/autoscaling/v1.ExternalMetricStatus"),
},
},
},
Required: []string{"type"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.ContainerResourceMetricStatus", "k8s.io/api/autoscaling/v1.ExternalMetricStatus", "k8s.io/api/autoscaling/v1.ObjectMetricStatus", "k8s.io/api/autoscaling/v1.PodsMetricStatus", "k8s.io/api/autoscaling/v1.ResourceMetricStatus"},
}
}
func schema_k8sio_api_autoscaling_v1_ObjectMetricSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"target": {
SchemaProps: spec.SchemaProps{
Description: "target is the described Kubernetes object.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.CrossVersionObjectReference"),
},
},
"metricName": {
SchemaProps: spec.SchemaProps{
Description: "metricName is the name of the metric in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"targetValue": {
SchemaProps: spec.SchemaProps{
Description: "targetValue is the target value of the metric (as a quantity).",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "selector is the string-encoded form of a standard kubernetes label selector for the given metric. When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
"averageValue": {
SchemaProps: spec.SchemaProps{
Description: "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"target", "metricName", "targetValue"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.CrossVersionObjectReference", "k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_autoscaling_v1_ObjectMetricStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"target": {
SchemaProps: spec.SchemaProps{
Description: "target is the described Kubernetes object.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.CrossVersionObjectReference"),
},
},
"metricName": {
SchemaProps: spec.SchemaProps{
Description: "metricName is the name of the metric in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"currentValue": {
SchemaProps: spec.SchemaProps{
Description: "currentValue is the current value of the metric (as a quantity).",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
"averageValue": {
SchemaProps: spec.SchemaProps{
Description: "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"target", "metricName", "currentValue"},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.CrossVersionObjectReference", "k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_autoscaling_v1_PodsMetricSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"metricName": {
SchemaProps: spec.SchemaProps{
Description: "metricName is the name of the metric in question",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"targetAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
},
Required: []string{"metricName", "targetAverageValue"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_autoscaling_v1_PodsMetricStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"metricName": {
SchemaProps: spec.SchemaProps{
Description: "metricName is the name of the metric in question",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"currentAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"),
},
},
},
Required: []string{"metricName", "currentAverageValue"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
}
}
func schema_k8sio_api_autoscaling_v1_ResourceMetricSource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the name of the resource in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"targetAverageUtilization": {
SchemaProps: spec.SchemaProps{
Description: "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.",
Type: []string{"integer"},
Format: "int32",
},
},
"targetAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.",
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"name"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_autoscaling_v1_ResourceMetricStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the name of the resource in question.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"currentAverageUtilization": {
SchemaProps: spec.SchemaProps{
Description: "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.",
Type: []string{"integer"},
Format: "int32",
},
},
"currentAverageValue": {
SchemaProps: spec.SchemaProps{
Description: "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/api/resource.Quantity"),
},
},
},
Required: []string{"name", "currentAverageValue"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/api/resource.Quantity"},
}
}
func schema_k8sio_api_autoscaling_v1_Scale(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Scale represents a scaling request for a resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
"spec": {
SchemaProps: spec.SchemaProps{
Description: "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.ScaleSpec"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/autoscaling/v1.ScaleStatus"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/api/autoscaling/v1.ScaleSpec", "k8s.io/api/autoscaling/v1.ScaleStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_k8sio_api_autoscaling_v1_ScaleSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ScaleSpec describes the attributes of a scale subresource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "desired number of instances for the scaled object.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
}
}
func schema_k8sio_api_autoscaling_v1_ScaleStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ScaleStatus represents the current status of a scale subresource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"replicas": {
SchemaProps: spec.SchemaProps{
Description: "actual number of observed instances of the scaled object.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
"selector": {
SchemaProps: spec.SchemaProps{
Description: "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"replicas"},
},
},
}
}
func schema_pkg_apis_meta_v1_APIGroup(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIGroup contains the name, the supported versions, and the preferred version of a group.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the name of the group.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"versions": {
SchemaProps: spec.SchemaProps{
Description: "versions are the versions supported in this group.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery"),
},
},
},
},
},
"preferredVersion": {
SchemaProps: spec.SchemaProps{
Description: "preferredVersion is the version preferred by the API server, which probably is the storage version.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery"),
},
},
"serverAddressByClientCIDRs": {
SchemaProps: spec.SchemaProps{
Description: "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"),
},
},
},
},
},
},
Required: []string{"name", "versions"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery", "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"},
}
}
func schema_pkg_apis_meta_v1_APIGroupList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"groups": {
SchemaProps: spec.SchemaProps{
Description: "groups is a list of APIGroup.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup"),
},
},
},
},
},
},
Required: []string{"groups"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup"},
}
}
func schema_pkg_apis_meta_v1_APIResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIResource specifies the name of a resource and whether it is namespaced.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is the plural name of the resource.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"singularName": {
SchemaProps: spec.SchemaProps{
Description: "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"namespaced": {
SchemaProps: spec.SchemaProps{
Description: "namespaced indicates if a resource is namespaced or not.",
Default: false,
Type: []string{"boolean"},
Format: "",
},
},
"group": {
SchemaProps: spec.SchemaProps{
Description: "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".",
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Description: "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"verbs": {
SchemaProps: spec.SchemaProps{
Description: "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"shortNames": {
SchemaProps: spec.SchemaProps{
Description: "shortNames is a list of suggested short names of the resource.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"categories": {
SchemaProps: spec.SchemaProps{
Description: "categories is a list of the grouped resources this resource belongs to (e.g. 'all')",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"storageVersionHash": {
SchemaProps: spec.SchemaProps{
Description: "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"name", "singularName", "namespaced", "kind", "verbs"},
},
},
}
}
func schema_pkg_apis_meta_v1_APIResourceList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"groupVersion": {
SchemaProps: spec.SchemaProps{
Description: "groupVersion is the group and version this APIResourceList is for.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"resources": {
SchemaProps: spec.SchemaProps{
Description: "resources contains the name of the resources and if they are namespaced.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.APIResource"),
},
},
},
},
},
},
Required: []string{"groupVersion", "resources"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.APIResource"},
}
}
func schema_pkg_apis_meta_v1_APIVersions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"versions": {
SchemaProps: spec.SchemaProps{
Description: "versions are the api versions that are available.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"serverAddressByClientCIDRs": {
SchemaProps: spec.SchemaProps{
Description: "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"),
},
},
},
},
},
},
Required: []string{"versions", "serverAddressByClientCIDRs"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR"},
}
}
func schema_pkg_apis_meta_v1_ApplyOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ApplyOptions may be provided when applying an API object. FieldManager is required for apply requests. ApplyOptions is equivalent to PatchOptions. It is provided as a convenience with documentation that speaks specifically to how the options fields relate to apply.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"force": {
SchemaProps: spec.SchemaProps{
Description: "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people.",
Default: false,
Type: []string{"boolean"},
Format: "",
},
},
"fieldManager": {
SchemaProps: spec.SchemaProps{
Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"force", "fieldManager"},
},
},
}
}
func schema_pkg_apis_meta_v1_Condition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Condition contains details for one aspect of the current state of this API Resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "type of condition in CamelCase or in foo.example.com/CamelCase.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "status of the condition, one of True, False, Unknown.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"observedGeneration": {
SchemaProps: spec.SchemaProps{
Description: "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.",
Type: []string{"integer"},
Format: "int64",
},
},
"lastTransitionTime": {
SchemaProps: spec.SchemaProps{
Description: "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "message is a human readable message indicating details about the transition. This may be an empty string.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status", "lastTransitionTime", "reason", "message"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_meta_v1_CreateOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "CreateOptions may be provided when creating an API object.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"fieldManager": {
SchemaProps: spec.SchemaProps{
Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_DeleteOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "DeleteOptions may be provided when deleting an API object.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"gracePeriodSeconds": {
SchemaProps: spec.SchemaProps{
Description: "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
Type: []string{"integer"},
Format: "int64",
},
},
"preconditions": {
SchemaProps: spec.SchemaProps{
Description: "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions"),
},
},
"orphanDependents": {
SchemaProps: spec.SchemaProps{
Description: "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
Type: []string{"boolean"},
Format: "",
},
},
"propagationPolicy": {
SchemaProps: spec.SchemaProps{
Description: "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions"},
}
}
func schema_pkg_apis_meta_v1_Duration(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Duration is a wrapper around time.Duration which supports correct marshaling to YAML and JSON. In particular, it marshals into strings, which can be used as map keys in json.",
Type: v1.Duration{}.OpenAPISchemaType(),
Format: v1.Duration{}.OpenAPISchemaFormat(),
},
},
}
}
func schema_pkg_apis_meta_v1_FieldsV1(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:<name>', where <name> is the name of a field in a struct, or key in a map 'v:<value>', where <value> is the exact json formatted value of a list item 'i:<index>', where <index> is position of a item in a list 'k:<keys>', where <keys> is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff",
Type: []string{"object"},
},
},
}
}
func schema_pkg_apis_meta_v1_GetOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GetOptions is the standard query options to the standard REST get call.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupKind(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying concepts during lookup stages without having partially valid types",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "kind"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying concepts during lookup stages without having partially valid types",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "resource"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupVersion(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupVersion contains the \"group\" and the \"version\", which uniquely identifies the API.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "version"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"groupVersion": {
SchemaProps: spec.SchemaProps{
Description: "groupVersion specifies the API group and version in the form \"group/version\"",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Description: "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"groupVersion", "version"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupVersionKind(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "version", "kind"},
},
},
}
}
func schema_pkg_apis_meta_v1_GroupVersionResource(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"group": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"version": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"resource": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"group", "version", "resource"},
},
},
}
}
func schema_pkg_apis_meta_v1_InternalEvent(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "InternalEvent makes watch.Event versioned",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"Type": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"Object": {
SchemaProps: spec.SchemaProps{
Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Bookmark: the object (instance of a type being watched) where\n only ResourceVersion field is set. On successful restart of watch from a\n bookmark resourceVersion, client is guaranteed to not get repeat event\n nor miss any events.\n * If Type is Error: *api.Status is recommended; other types may make sense\n depending on context.",
Ref: ref("k8s.io/apimachinery/pkg/runtime.Object"),
},
},
},
Required: []string{"Type", "Object"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/runtime.Object"},
}
}
func schema_pkg_apis_meta_v1_LabelSelector(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"matchLabels": {
SchemaProps: spec.SchemaProps{
Description: "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"matchExpressions": {
SchemaProps: spec.SchemaProps{
Description: "matchExpressions is a list of label selector requirements. The requirements are ANDed.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement"),
},
},
},
},
},
},
},
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-map-type": "atomic",
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement"},
}
}
func schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"key": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "key",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "key is the label key that the selector applies to.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"operator": {
SchemaProps: spec.SchemaProps{
Description: "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"values": {
SchemaProps: spec.SchemaProps{
Description: "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"key", "operator"},
},
},
}
}
func schema_pkg_apis_meta_v1_List(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "List holds a list of objects, which may not be known by the server.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "List of objects",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_pkg_apis_meta_v1_ListMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"selfLink": {
SchemaProps: spec.SchemaProps{
Description: "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
Type: []string{"string"},
Format: "",
},
},
"continue": {
SchemaProps: spec.SchemaProps{
Description: "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.",
Type: []string{"string"},
Format: "",
},
},
"remainingItemCount": {
SchemaProps: spec.SchemaProps{
Description: "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.",
Type: []string{"integer"},
Format: "int64",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_ListOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ListOptions is the query options to a standard REST list call.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"labelSelector": {
SchemaProps: spec.SchemaProps{
Description: "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
Type: []string{"string"},
Format: "",
},
},
"fieldSelector": {
SchemaProps: spec.SchemaProps{
Description: "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
Type: []string{"string"},
Format: "",
},
},
"watch": {
SchemaProps: spec.SchemaProps{
Description: "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
Type: []string{"boolean"},
Format: "",
},
},
"allowWatchBookmarks": {
SchemaProps: spec.SchemaProps{
Description: "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
Type: []string{"boolean"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
Type: []string{"string"},
Format: "",
},
},
"resourceVersionMatch": {
SchemaProps: spec.SchemaProps{
Description: "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
Type: []string{"string"},
Format: "",
},
},
"timeoutSeconds": {
SchemaProps: spec.SchemaProps{
Description: "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
Type: []string{"integer"},
Format: "int64",
},
},
"limit": {
SchemaProps: spec.SchemaProps{
Description: "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
Type: []string{"integer"},
Format: "int64",
},
},
"continue": {
SchemaProps: spec.SchemaProps{
Description: "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_ManagedFieldsEntry(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"manager": {
SchemaProps: spec.SchemaProps{
Description: "Manager is an identifier of the workflow managing these fields.",
Type: []string{"string"},
Format: "",
},
},
"operation": {
SchemaProps: spec.SchemaProps{
Description: "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.",
Type: []string{"string"},
Format: "",
},
},
"time": {
SchemaProps: spec.SchemaProps{
Description: "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"fieldsType": {
SchemaProps: spec.SchemaProps{
Description: "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"",
Type: []string{"string"},
Format: "",
},
},
"fieldsV1": {
SchemaProps: spec.SchemaProps{
Description: "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1"),
},
},
"subresource": {
SchemaProps: spec.SchemaProps{
Description: "Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_meta_v1_MicroTime(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "MicroTime is version of Time with microsecond level precision.",
Type: v1.MicroTime{}.OpenAPISchemaType(),
Format: v1.MicroTime{}.OpenAPISchemaFormat(),
},
},
}
}
func schema_pkg_apis_meta_v1_ObjectMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Type: []string{"string"},
Format: "",
},
},
"generateName": {
SchemaProps: spec.SchemaProps{
Description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency",
Type: []string{"string"},
Format: "",
},
},
"namespace": {
SchemaProps: spec.SchemaProps{
Description: "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces",
Type: []string{"string"},
Format: "",
},
},
"selfLink": {
SchemaProps: spec.SchemaProps{
Description: "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.",
Type: []string{"string"},
Format: "",
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Description: "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
Type: []string{"string"},
Format: "",
},
},
"generation": {
SchemaProps: spec.SchemaProps{
Description: "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.",
Type: []string{"integer"},
Format: "int64",
},
},
"creationTimestamp": {
SchemaProps: spec.SchemaProps{
Description: "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"deletionTimestamp": {
SchemaProps: spec.SchemaProps{
Description: "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"),
},
},
"deletionGracePeriodSeconds": {
SchemaProps: spec.SchemaProps{
Description: "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.",
Type: []string{"integer"},
Format: "int64",
},
},
"labels": {
SchemaProps: spec.SchemaProps{
Description: "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"annotations": {
SchemaProps: spec.SchemaProps{
Description: "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations",
Type: []string{"object"},
AdditionalProperties: &spec.SchemaOrBool{
Allows: true,
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"ownerReferences": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-merge-key": "uid",
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference"),
},
},
},
},
},
"finalizers": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-patch-strategy": "merge",
},
},
SchemaProps: spec.SchemaProps{
Description: "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"clusterName": {
SchemaProps: spec.SchemaProps{
Description: "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.",
Type: []string{"string"},
Format: "",
},
},
"managedFields": {
SchemaProps: spec.SchemaProps{
Description: "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry"),
},
},
},
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry", "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"},
}
}
func schema_pkg_apis_meta_v1_OwnerReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "API version of the referent.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"name": {
SchemaProps: spec.SchemaProps{
Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Description: "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"controller": {
SchemaProps: spec.SchemaProps{
Description: "If true, this reference points to the managing controller.",
Type: []string{"boolean"},
Format: "",
},
},
"blockOwnerDeletion": {
SchemaProps: spec.SchemaProps{
Description: "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.",
Type: []string{"boolean"},
Format: "",
},
},
},
Required: []string{"apiVersion", "kind", "name", "uid"},
},
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-map-type": "atomic",
},
},
},
}
}
func schema_pkg_apis_meta_v1_PartialObjectMetadata(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
}
}
func schema_pkg_apis_meta_v1_PartialObjectMetadataList(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PartialObjectMetadataList contains a list of objects containing only their metadata",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"items": {
SchemaProps: spec.SchemaProps{
Description: "items contains each of the included items.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata"),
},
},
},
},
},
},
Required: []string{"items"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata"},
}
}
func schema_pkg_apis_meta_v1_Patch(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.",
Type: []string{"object"},
},
},
}
}
func schema_pkg_apis_meta_v1_PatchOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"force": {
SchemaProps: spec.SchemaProps{
Description: "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
Type: []string{"boolean"},
Format: "",
},
},
"fieldManager": {
SchemaProps: spec.SchemaProps{
Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_Preconditions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"uid": {
SchemaProps: spec.SchemaProps{
Description: "Specifies the target UID.",
Type: []string{"string"},
Format: "",
},
},
"resourceVersion": {
SchemaProps: spec.SchemaProps{
Description: "Specifies the target ResourceVersion",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_RootPaths(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"paths": {
SchemaProps: spec.SchemaProps{
Description: "paths are the paths available at root.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
},
Required: []string{"paths"},
},
},
}
}
func schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"clientCIDR": {
SchemaProps: spec.SchemaProps{
Description: "The CIDR with which clients can match their IP to figure out the server address that they should use.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"serverAddress": {
SchemaProps: spec.SchemaProps{
Description: "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"clientCIDR", "serverAddress"},
},
},
}
}
func schema_pkg_apis_meta_v1_Status(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Status is a return value for calls that don't return other objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "A human-readable description of the status of this operation.",
Type: []string{"string"},
Format: "",
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.",
Type: []string{"string"},
Format: "",
},
},
"details": {
SchemaProps: spec.SchemaProps{
Description: "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.",
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails"),
},
},
"code": {
SchemaProps: spec.SchemaProps{
Description: "Suggested HTTP return code for this status, 0 if not set.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails"},
}
}
func schema_pkg_apis_meta_v1_StatusCause(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"reason": {
SchemaProps: spec.SchemaProps{
Description: "A machine-readable description of the cause of the error. If this value is empty there is no information available.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "A human-readable description of the cause of the error. This field may be presented as-is to a reader.",
Type: []string{"string"},
Format: "",
},
},
"field": {
SchemaProps: spec.SchemaProps{
Description: "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_StatusDetails(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).",
Type: []string{"string"},
Format: "",
},
},
"group": {
SchemaProps: spec.SchemaProps{
Description: "The group attribute of the resource associated with the status StatusReason.",
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Description: "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"uid": {
SchemaProps: spec.SchemaProps{
Description: "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids",
Type: []string{"string"},
Format: "",
},
},
"causes": {
SchemaProps: spec.SchemaProps{
Description: "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause"),
},
},
},
},
},
"retryAfterSeconds": {
SchemaProps: spec.SchemaProps{
Description: "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause"},
}
}
func schema_pkg_apis_meta_v1_Table(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"metadata": {
SchemaProps: spec.SchemaProps{
Description: "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
},
},
"columnDefinitions": {
SchemaProps: spec.SchemaProps{
Description: "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition"),
},
},
},
},
},
"rows": {
SchemaProps: spec.SchemaProps{
Description: "rows is the list of items in the table.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.TableRow"),
},
},
},
},
},
},
Required: []string{"columnDefinitions", "rows"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition", "k8s.io/apimachinery/pkg/apis/meta/v1.TableRow"},
}
}
func schema_pkg_apis_meta_v1_TableColumnDefinition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TableColumnDefinition contains information about a column returned in the Table.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"name": {
SchemaProps: spec.SchemaProps{
Description: "name is a human readable name for the column.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"type": {
SchemaProps: spec.SchemaProps{
Description: "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"format": {
SchemaProps: spec.SchemaProps{
Description: "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"description": {
SchemaProps: spec.SchemaProps{
Description: "description is a human readable description of this column.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"priority": {
SchemaProps: spec.SchemaProps{
Description: "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"name", "type", "format", "description", "priority"},
},
},
}
}
func schema_pkg_apis_meta_v1_TableOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TableOptions are used when a Table is requested by the caller.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"includeObject": {
SchemaProps: spec.SchemaProps{
Description: "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_TableRow(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TableRow is an individual row in a table.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"cells": {
SchemaProps: spec.SchemaProps{
Description: "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Type: []string{"object"},
Format: "",
},
},
},
},
},
"conditions": {
SchemaProps: spec.SchemaProps{
Description: "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition"),
},
},
},
},
},
"object": {
SchemaProps: spec.SchemaProps{
Description: "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
},
Required: []string{"cells"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition", "k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_pkg_apis_meta_v1_TableRowCondition(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TableRowCondition allows a row to be marked with additional information.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Description: "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"status": {
SchemaProps: spec.SchemaProps{
Description: "Status of the condition, one of True, False, Unknown.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"reason": {
SchemaProps: spec.SchemaProps{
Description: "(brief) machine readable reason for the condition's last transition.",
Type: []string{"string"},
Format: "",
},
},
"message": {
SchemaProps: spec.SchemaProps{
Description: "Human readable message indicating details about last transition.",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"type", "status"},
},
},
}
}
func schema_pkg_apis_meta_v1_Time(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.",
Type: v1.Time{}.OpenAPISchemaType(),
Format: v1.Time{}.OpenAPISchemaFormat(),
},
},
}
}
func schema_pkg_apis_meta_v1_Timestamp(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Timestamp is a struct that is equivalent to Time, but intended for protobuf marshalling/unmarshalling. It is generated into a serialization that matches Time. Do not use in Go structs.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"seconds": {
SchemaProps: spec.SchemaProps{
Description: "Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.",
Default: 0,
Type: []string{"integer"},
Format: "int64",
},
},
"nanos": {
SchemaProps: spec.SchemaProps{
Description: "Non-negative fractions of a second at nanosecond resolution. Negative second values with fractions must still have non-negative nanos values that count forward in time. Must be from 0 to 999,999,999 inclusive. This field may be limited in precision depending on context.",
Default: 0,
Type: []string{"integer"},
Format: "int32",
},
},
},
Required: []string{"seconds", "nanos"},
},
},
}
}
func schema_pkg_apis_meta_v1_TypeMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_UpdateOptions(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"kind": {
SchemaProps: spec.SchemaProps{
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
Type: []string{"string"},
Format: "",
},
},
"apiVersion": {
SchemaProps: spec.SchemaProps{
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
Type: []string{"string"},
Format: "",
},
},
"dryRun": {
SchemaProps: spec.SchemaProps{
Description: "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
},
},
"fieldManager": {
SchemaProps: spec.SchemaProps{
Description: "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_pkg_apis_meta_v1_WatchEvent(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Event represents a single event to a watched resource.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"type": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"object": {
SchemaProps: spec.SchemaProps{
Description: "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.",
Default: map[string]interface{}{},
Ref: ref("k8s.io/apimachinery/pkg/runtime.RawExtension"),
},
},
},
Required: []string{"type", "object"},
},
},
Dependencies: []string{
"k8s.io/apimachinery/pkg/runtime.RawExtension"},
}
}
func schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package: type MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n} type PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this: {\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
Type: []string{"object"},
},
},
}
}
func schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, like this: type MyAwesomeAPIObject struct {\n runtime.TypeMeta `json:\",inline\"`\n ... // other fields\n} func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind\n\nTypeMeta is provided here for convenience. You may use it directly from this package or define your own with the same fields.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
},
},
},
}
}
func schema_k8sio_apimachinery_pkg_runtime_Unknown(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Unknown allows api objects with unknown types to be passed-through. This can be used to deal with the API objects from a plug-in. Unknown objects still have functioning TypeMeta features-- kind, version, etc. metadata and field mutatation.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"apiVersion": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"kind": {
SchemaProps: spec.SchemaProps{
Type: []string{"string"},
Format: "",
},
},
"Raw": {
SchemaProps: spec.SchemaProps{
Description: "Raw will hold the complete serialized object which couldn't be matched with a registered type. Most likely, nothing should be done with this except for passing it through the system.",
Type: []string{"string"},
Format: "byte",
},
},
"ContentEncoding": {
SchemaProps: spec.SchemaProps{
Description: "ContentEncoding is encoding used to encode 'Raw' data. Unspecified means no encoding.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
"ContentType": {
SchemaProps: spec.SchemaProps{
Description: "ContentType is serialization method used to serialize 'Raw'. Unspecified means ContentTypeJSON.",
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"Raw", "ContentEncoding", "ContentType"},
},
},
}
}
func schema_k8sio_apimachinery_pkg_version_Info(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "Info contains versioning information. how we'll want to distribute that information.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"major": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"minor": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"gitVersion": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"gitCommit": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"gitTreeState": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"buildDate": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"goVersion": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"compiler": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
"platform": {
SchemaProps: spec.SchemaProps{
Default: "",
Type: []string{"string"},
Format: "",
},
},
},
Required: []string{"major", "minor", "gitVersion", "gitCommit", "gitTreeState", "buildDate", "goVersion", "compiler", "platform"},
},
},
}
}
|
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
"runtime"
"strings"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddytls"
"github.com/lucas-clemente/quic-go/http3"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Server describes an HTTP server.
type Server struct {
// Socket addresses to which to bind listeners. Accepts
// [network addresses](/docs/conventions#network-addresses)
// that may include port ranges. Listener addresses must
// be unique; they cannot be repeated across all defined
// servers.
Listen []string `json:"listen,omitempty"`
// A list of listener wrapper modules, which can modify the behavior
// of the base listener. They are applied in the given order.
ListenerWrappersRaw []json.RawMessage `json:"listener_wrappers,omitempty" caddy:"namespace=caddy.listeners inline_key=wrapper"`
// How long to allow a read from a client's upload. Setting this
// to a short, non-zero value can mitigate slowloris attacks, but
// may also affect legitimately slow clients.
ReadTimeout caddy.Duration `json:"read_timeout,omitempty"`
// ReadHeaderTimeout is like ReadTimeout but for request headers.
ReadHeaderTimeout caddy.Duration `json:"read_header_timeout,omitempty"`
// WriteTimeout is how long to allow a write to a client. Note
// that setting this to a small value when serving large files
// may negatively affect legitimately slow clients.
WriteTimeout caddy.Duration `json:"write_timeout,omitempty"`
// IdleTimeout is the maximum time to wait for the next request
// when keep-alives are enabled. If zero, ReadTimeout is used.
// If both are zero, there is no timeout.
IdleTimeout caddy.Duration `json:"idle_timeout,omitempty"`
// MaxHeaderBytes is the maximum size to parse from a client's
// HTTP request headers.
MaxHeaderBytes int `json:"max_header_bytes,omitempty"`
// Routes describes how this server will handle requests.
// Routes are executed sequentially. First a route's matchers
// are evaluated, then its grouping. If it matches and has
// not been mutually-excluded by its grouping, then its
// handlers are executed sequentially. The sequence of invoked
// handlers comprises a compiled middleware chain that flows
// from each matching route and its handlers to the next.
Routes RouteList `json:"routes,omitempty"`
// Errors is how this server will handle errors returned from any
// of the handlers in the primary routes. If the primary handler
// chain returns an error, the error along with its recommended
// status code are bubbled back up to the HTTP server which
// executes a separate error route, specified using this property.
// The error routes work exactly like the normal routes.
Errors *HTTPErrorConfig `json:"errors,omitempty"`
// How to handle TLS connections. At least one policy is
// required to enable HTTPS on this server if automatic
// HTTPS is disabled or does not apply.
TLSConnPolicies caddytls.ConnectionPolicies `json:"tls_connection_policies,omitempty"`
// AutoHTTPS configures or disables automatic HTTPS within this server.
// HTTPS is enabled automatically and by default when qualifying names
// are present in a Host matcher and/or when the server is listening
// only on the HTTPS port.
AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"`
// If true, will require that a request's Host header match
// the value of the ServerName sent by the client's TLS
// ClientHello; often a necessary safeguard when using TLS
// client authentication.
StrictSNIHost *bool `json:"strict_sni_host,omitempty"`
// Enables access logging and configures how access logs are handled
// in this server. To minimally enable access logs, simply set this
// to a non-null, empty struct.
Logs *ServerLogConfig `json:"logs,omitempty"`
// Enable experimental HTTP/3 support. Note that HTTP/3 is not a
// finished standard and has extremely limited client support.
// This field is not subject to compatibility promises.
ExperimentalHTTP3 bool `json:"experimental_http3,omitempty"`
// Enables H2C ("Cleartext HTTP/2" or "H2 over TCP") support,
// which will serve HTTP/2 over plaintext TCP connections if
// a client support it. Because this is not implemented by the
// Go standard library, using H2C is incompatible with most
// of the other options for this server. Do not enable this
// only to achieve maximum client compatibility. In practice,
// very few clients implement H2C, and even fewer require it.
// This setting applies only to unencrypted HTTP listeners.
// ⚠️ Experimental feature; subject to change or removal.
AllowH2C bool `json:"allow_h2c,omitempty"`
name string
primaryHandlerChain Handler
errorHandlerChain Handler
listenerWrappers []caddy.ListenerWrapper
tlsApp *caddytls.TLS
logger *zap.Logger
accessLogger *zap.Logger
errorLogger *zap.Logger
h3server *http3.Server
}
// ServeHTTP is the entry point for all HTTP requests.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "Caddy")
if s.h3server != nil {
err := s.h3server.SetQuicHeaders(w.Header())
if err != nil {
s.logger.Error("setting HTTP/3 Alt-Svc header", zap.Error(err))
}
}
repl := caddy.NewReplacer()
r = PrepareRequest(r, repl, w, s)
// encode the request for logging purposes before
// it enters any handler chain; this is necessary
// to capture the original request in case it gets
// modified during handling
loggableReq := zap.Object("request", LoggableHTTPRequest{r})
errLog := s.errorLogger.With(loggableReq)
var duration time.Duration
if s.shouldLogRequest(r) {
wrec := NewResponseRecorder(w, nil, nil)
w = wrec
// capture the original version of the request
accLog := s.accessLogger.With(loggableReq)
defer func() {
repl.Set("http.response.status", wrec.Status())
repl.Set("http.response.size", wrec.Size())
repl.Set("http.response.duration", duration)
logger := accLog
if s.Logs != nil {
logger = s.Logs.wrapLogger(logger, r.Host)
}
log := logger.Info
if wrec.Status() >= 400 {
log = logger.Error
}
log("handled request",
zap.String("common_log", repl.ReplaceAll(commonLogFormat, commonLogEmptyValue)),
zap.Duration("duration", duration),
zap.Int("size", wrec.Size()),
zap.Int("status", wrec.Status()),
zap.Object("resp_headers", LoggableHTTPHeader(wrec.Header())),
)
}()
}
start := time.Now()
// guarantee ACME HTTP challenges; handle them
// separately from any user-defined handlers
if s.tlsApp.HandleHTTPChallenge(w, r) {
duration = time.Since(start)
return
}
// execute the primary handler chain
err := s.primaryHandlerChain.ServeHTTP(w, r)
duration = time.Since(start)
// if no errors, we're done!
if err == nil {
return
}
// prepare the error log
logger := errLog
if s.Logs != nil {
logger = s.Logs.wrapLogger(logger, r.Host)
}
logger = logger.With(zap.Duration("duration", duration))
// get the values that will be used to log the error
errStatus, errMsg, errFields := errLogValues(err)
// add HTTP error information to request context
r = s.Errors.WithError(r, err)
if s.Errors != nil && len(s.Errors.Routes) > 0 {
// execute user-defined error handling route
err2 := s.errorHandlerChain.ServeHTTP(w, r)
if err2 == nil {
// user's error route handled the error response
// successfully, so now just log the error
if errStatus >= 500 {
logger.Error(errMsg, errFields...)
}
} else {
// well... this is awkward
errFields = append([]zapcore.Field{
zap.String("error", err2.Error()),
zap.Namespace("first_error"),
zap.String("msg", errMsg),
}, errFields...)
logger.Error("error handling handler error", errFields...)
}
} else {
if errStatus >= 500 {
logger.Error(errMsg, errFields...)
}
w.WriteHeader(errStatus)
}
}
// wrapPrimaryRoute wraps stack (a compiled middleware handler chain)
// in s.enforcementHandler which performs crucial security checks, etc.
func (s *Server) wrapPrimaryRoute(stack Handler) Handler {
return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
return s.enforcementHandler(w, r, stack)
})
}
// enforcementHandler is an implicit middleware which performs
// standard checks before executing the HTTP middleware chain.
func (s *Server) enforcementHandler(w http.ResponseWriter, r *http.Request, next Handler) error {
// enforce strict host matching, which ensures that the SNI
// value (if any), matches the Host header; essential for
// servers that rely on TLS ClientAuth sharing a listener
// with servers that do not; if not enforced, client could
// bypass by sending benign SNI then restricted Host header
if s.StrictSNIHost != nil && *s.StrictSNIHost && r.TLS != nil {
hostname, _, err := net.SplitHostPort(r.Host)
if err != nil {
hostname = r.Host // OK; probably lacked port
}
if !strings.EqualFold(r.TLS.ServerName, hostname) {
err := fmt.Errorf("strict host matching: TLS ServerName (%s) and HTTP Host (%s) values differ",
r.TLS.ServerName, hostname)
r.Close = true
return Error(http.StatusForbidden, err)
}
}
return next.ServeHTTP(w, r)
}
// listenersUseAnyPortOtherThan returns true if there are any
// listeners in s that use a port which is not otherPort.
func (s *Server) listenersUseAnyPortOtherThan(otherPort int) bool {
for _, lnAddr := range s.Listen {
laddrs, err := caddy.ParseNetworkAddress(lnAddr)
if err != nil {
continue
}
if uint(otherPort) > laddrs.EndPort || uint(otherPort) < laddrs.StartPort {
return true
}
}
return false
}
// hasListenerAddress returns true if s has a listener
// at the given address fullAddr. Currently, fullAddr
// must represent exactly one socket address (port
// ranges are not supported)
func (s *Server) hasListenerAddress(fullAddr string) bool {
laddrs, err := caddy.ParseNetworkAddress(fullAddr)
if err != nil {
return false
}
if laddrs.PortRangeSize() != 1 {
return false // TODO: support port ranges
}
for _, lnAddr := range s.Listen {
thisAddrs, err := caddy.ParseNetworkAddress(lnAddr)
if err != nil {
continue
}
if thisAddrs.Network != laddrs.Network {
continue
}
// Apparently, Linux requires all bound ports to be distinct
// *regardless of host interface* even if the addresses are
// in fact different; binding "192.168.0.1:9000" and then
// ":9000" will fail for ":9000" because "address is already
// in use" even though it's not, and the same bindings work
// fine on macOS. I also found on Linux that listening on
// "[::]:9000" would fail with a similar error, except with
// the address "0.0.0.0:9000", as if deliberately ignoring
// that I specified the IPv6 interface explicitly. This seems
// to be a major bug in the Linux network stack and I don't
// know why it hasn't been fixed yet, so for now we have to
// special-case ourselves around Linux like a doting parent.
// The second issue seems very similar to a discussion here:
// https://github.com/nodejs/node/issues/9390
//
// This is very easy to reproduce by creating an HTTP server
// that listens to both addresses or just one with a host
// interface; or for a more confusing reproduction, try
// listening on "127.0.0.1:80" and ":443" and you'll see
// the error, if you take away the GOOS condition below.
//
// So, an address is equivalent if the port is in the port
// range, and if not on Linux, the host is the same... sigh.
if (runtime.GOOS == "linux" || thisAddrs.Host == laddrs.Host) &&
(laddrs.StartPort <= thisAddrs.EndPort) &&
(laddrs.StartPort >= thisAddrs.StartPort) {
return true
}
}
return false
}
func (s *Server) hasTLSClientAuth() bool {
for _, cp := range s.TLSConnPolicies {
if cp.ClientAuthentication != nil && cp.ClientAuthentication.Active() {
return true
}
}
return false
}
// HTTPErrorConfig determines how to handle errors
// from the HTTP handlers.
type HTTPErrorConfig struct {
// The routes to evaluate after the primary handler
// chain returns an error. In an error route, extra
// placeholders are available:
//
// Placeholder | Description
// ------------|---------------
// `{http.error.status_code}` | The recommended HTTP status code
// `{http.error.status_text}` | The status text associated with the recommended status code
// `{http.error.message}` | The error message
// `{http.error.trace}` | The origin of the error
// `{http.error.id}` | An identifier for this occurrence of the error
Routes RouteList `json:"routes,omitempty"`
}
// WithError makes a shallow copy of r to add the error to its
// context, and sets placeholders on the request's replacer
// related to err. It returns the modified request which has
// the error information in its context and replacer. It
// overwrites any existing error values that are stored.
func (*HTTPErrorConfig) WithError(r *http.Request, err error) *http.Request {
// add the raw error value to the request context
// so it can be accessed by error handlers
c := context.WithValue(r.Context(), ErrorCtxKey, err)
r = r.WithContext(c)
// add error values to the replacer
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
repl.Set("http.error", err)
if handlerErr, ok := err.(HandlerError); ok {
repl.Set("http.error.status_code", handlerErr.StatusCode)
repl.Set("http.error.status_text", http.StatusText(handlerErr.StatusCode))
repl.Set("http.error.trace", handlerErr.Trace)
repl.Set("http.error.id", handlerErr.ID)
}
return r
}
// shouldLogRequest returns true if this request should be logged.
func (s *Server) shouldLogRequest(r *http.Request) bool {
if s.accessLogger == nil || s.Logs == nil {
// logging is disabled
return false
}
for _, dh := range s.Logs.SkipHosts {
// logging for this particular host is disabled
if r.Host == dh {
return false
}
}
if _, ok := s.Logs.LoggerNames[r.Host]; ok {
// this host is mapped to a particular logger name
return true
}
if s.Logs.SkipUnmappedHosts {
// this host is not mapped and thus must not be logged
return false
}
return true
}
// ServerLogConfig describes a server's logging configuration. If
// enabled without customization, all requests to this server are
// logged to the default logger; logger destinations may be
// customized per-request-host.
type ServerLogConfig struct {
// The default logger name for all logs emitted by this server for
// hostnames that are not in the LoggerNames (logger_names) map.
DefaultLoggerName string `json:"default_logger_name,omitempty"`
// LoggerNames maps request hostnames to a custom logger name.
// For example, a mapping of "example.com" to "example" would
// cause access logs from requests with a Host of example.com
// to be emitted by a logger named "http.log.access.example".
LoggerNames map[string]string `json:"logger_names,omitempty"`
// By default, all requests to this server will be logged if
// access logging is enabled. This field lists the request
// hosts for which access logging should be disabled.
SkipHosts []string `json:"skip_hosts,omitempty"`
// If true, requests to any host not appearing in the
// LoggerNames (logger_names) map will not be logged.
SkipUnmappedHosts bool `json:"skip_unmapped_hosts,omitempty"`
}
// wrapLogger wraps logger in a logger named according to user preferences for the given host.
func (slc ServerLogConfig) wrapLogger(logger *zap.Logger, host string) *zap.Logger {
if loggerName := slc.getLoggerName(host); loggerName != "" {
return logger.Named(loggerName)
}
return logger
}
func (slc ServerLogConfig) getLoggerName(host string) string {
tryHost := func(key string) (string, bool) {
// first try exact match
if loggerName, ok := slc.LoggerNames[key]; ok {
return loggerName, ok
}
// strip port and try again (i.e. Host header of "example.com:1234" should
// match "example.com" if there is no "example.com:1234" in the map)
hostOnly, _, err := net.SplitHostPort(key)
if err != nil {
return "", false
}
loggerName, ok := slc.LoggerNames[hostOnly]
return loggerName, ok
}
// try the exact hostname first
if loggerName, ok := tryHost(host); ok {
return loggerName
}
// try matching wildcard domains if other non-specific loggers exist
labels := strings.Split(host, ".")
for i := range labels {
if labels[i] == "" {
continue
}
labels[i] = "*"
wildcardHost := strings.Join(labels, ".")
if loggerName, ok := tryHost(wildcardHost); ok {
return loggerName
}
}
return slc.DefaultLoggerName
}
// PrepareRequest fills the request r for use in a Caddy HTTP handler chain. w and s can
// be nil, but the handlers will lose response placeholders and access to the server.
func PrepareRequest(r *http.Request, repl *caddy.Replacer, w http.ResponseWriter, s *Server) *http.Request {
// set up the context for the request
ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl)
ctx = context.WithValue(ctx, ServerCtxKey, s)
ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]interface{}))
ctx = context.WithValue(ctx, routeGroupCtxKey, make(map[string]struct{}))
var url2 url.URL // avoid letting this escape to the heap
ctx = context.WithValue(ctx, OriginalRequestCtxKey, originalRequest(r, &url2))
r = r.WithContext(ctx)
// once the pointer to the request won't change
// anymore, finish setting up the replacer
addHTTPVarsToReplacer(repl, r, w)
return r
}
// errLogValues inspects err and returns the status code
// to use, the error log message, and any extra fields.
// If err is a HandlerError, the returned values will
// have richer information.
func errLogValues(err error) (status int, msg string, fields []zapcore.Field) {
if handlerErr, ok := err.(HandlerError); ok {
status = handlerErr.StatusCode
if handlerErr.Err == nil {
msg = err.Error()
} else {
msg = handlerErr.Err.Error()
}
fields = []zapcore.Field{
zap.Int("status", handlerErr.StatusCode),
zap.String("err_id", handlerErr.ID),
zap.String("err_trace", handlerErr.Trace),
}
return
}
status = http.StatusInternalServerError
msg = err.Error()
return
}
// originalRequest returns a partial, shallow copy of
// req, including: req.Method, deep copy of req.URL
// (into the urlCopy parameter, which should be on the
// stack), req.RequestURI, and req.RemoteAddr. Notably,
// headers are not copied. This function is designed to
// be very fast and efficient, and useful primarily for
// read-only/logging purposes.
func originalRequest(req *http.Request, urlCopy *url.URL) http.Request {
cloneURL(req.URL, urlCopy)
return http.Request{
Method: req.Method,
RemoteAddr: req.RemoteAddr,
RequestURI: req.RequestURI,
URL: urlCopy,
}
}
// cloneURL makes a copy of r.URL and returns a
// new value that doesn't reference the original.
func cloneURL(from, to *url.URL) {
*to = *from
if from.User != nil {
userInfo := new(url.Userinfo)
*userInfo = *from.User
to.User = userInfo
}
}
const (
// commonLogFormat is the common log format. https://en.wikipedia.org/wiki/Common_Log_Format
commonLogFormat = `{http.request.remote.host} ` + commonLogEmptyValue + ` {http.auth.user.id} [{time.now.common_log}] "{http.request.orig_method} {http.request.orig_uri} {http.request.proto}" {http.response.status} {http.response.size}`
// commonLogEmptyValue is the common empty log value.
commonLogEmptyValue = "-"
)
// Context keys for HTTP request context values.
const (
// For referencing the server instance
ServerCtxKey caddy.CtxKey = "server"
// For the request's variable table
VarsCtxKey caddy.CtxKey = "vars"
// For a partial copy of the unmodified request that
// originally came into the server's entry handler
OriginalRequestCtxKey caddy.CtxKey = "original_request"
)
caddyhttp: Restore original request params before error handlers (#3781)
* caddyhttp: Restore original request params before error handlers
Fixes #3717
* Add comment
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddyhttp
import (
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
"runtime"
"strings"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/modules/caddytls"
"github.com/lucas-clemente/quic-go/http3"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Server describes an HTTP server.
type Server struct {
// Socket addresses to which to bind listeners. Accepts
// [network addresses](/docs/conventions#network-addresses)
// that may include port ranges. Listener addresses must
// be unique; they cannot be repeated across all defined
// servers.
Listen []string `json:"listen,omitempty"`
// A list of listener wrapper modules, which can modify the behavior
// of the base listener. They are applied in the given order.
ListenerWrappersRaw []json.RawMessage `json:"listener_wrappers,omitempty" caddy:"namespace=caddy.listeners inline_key=wrapper"`
// How long to allow a read from a client's upload. Setting this
// to a short, non-zero value can mitigate slowloris attacks, but
// may also affect legitimately slow clients.
ReadTimeout caddy.Duration `json:"read_timeout,omitempty"`
// ReadHeaderTimeout is like ReadTimeout but for request headers.
ReadHeaderTimeout caddy.Duration `json:"read_header_timeout,omitempty"`
// WriteTimeout is how long to allow a write to a client. Note
// that setting this to a small value when serving large files
// may negatively affect legitimately slow clients.
WriteTimeout caddy.Duration `json:"write_timeout,omitempty"`
// IdleTimeout is the maximum time to wait for the next request
// when keep-alives are enabled. If zero, ReadTimeout is used.
// If both are zero, there is no timeout.
IdleTimeout caddy.Duration `json:"idle_timeout,omitempty"`
// MaxHeaderBytes is the maximum size to parse from a client's
// HTTP request headers.
MaxHeaderBytes int `json:"max_header_bytes,omitempty"`
// Routes describes how this server will handle requests.
// Routes are executed sequentially. First a route's matchers
// are evaluated, then its grouping. If it matches and has
// not been mutually-excluded by its grouping, then its
// handlers are executed sequentially. The sequence of invoked
// handlers comprises a compiled middleware chain that flows
// from each matching route and its handlers to the next.
Routes RouteList `json:"routes,omitempty"`
// Errors is how this server will handle errors returned from any
// of the handlers in the primary routes. If the primary handler
// chain returns an error, the error along with its recommended
// status code are bubbled back up to the HTTP server which
// executes a separate error route, specified using this property.
// The error routes work exactly like the normal routes.
Errors *HTTPErrorConfig `json:"errors,omitempty"`
// How to handle TLS connections. At least one policy is
// required to enable HTTPS on this server if automatic
// HTTPS is disabled or does not apply.
TLSConnPolicies caddytls.ConnectionPolicies `json:"tls_connection_policies,omitempty"`
// AutoHTTPS configures or disables automatic HTTPS within this server.
// HTTPS is enabled automatically and by default when qualifying names
// are present in a Host matcher and/or when the server is listening
// only on the HTTPS port.
AutoHTTPS *AutoHTTPSConfig `json:"automatic_https,omitempty"`
// If true, will require that a request's Host header match
// the value of the ServerName sent by the client's TLS
// ClientHello; often a necessary safeguard when using TLS
// client authentication.
StrictSNIHost *bool `json:"strict_sni_host,omitempty"`
// Enables access logging and configures how access logs are handled
// in this server. To minimally enable access logs, simply set this
// to a non-null, empty struct.
Logs *ServerLogConfig `json:"logs,omitempty"`
// Enable experimental HTTP/3 support. Note that HTTP/3 is not a
// finished standard and has extremely limited client support.
// This field is not subject to compatibility promises.
ExperimentalHTTP3 bool `json:"experimental_http3,omitempty"`
// Enables H2C ("Cleartext HTTP/2" or "H2 over TCP") support,
// which will serve HTTP/2 over plaintext TCP connections if
// a client support it. Because this is not implemented by the
// Go standard library, using H2C is incompatible with most
// of the other options for this server. Do not enable this
// only to achieve maximum client compatibility. In practice,
// very few clients implement H2C, and even fewer require it.
// This setting applies only to unencrypted HTTP listeners.
// ⚠️ Experimental feature; subject to change or removal.
AllowH2C bool `json:"allow_h2c,omitempty"`
name string
primaryHandlerChain Handler
errorHandlerChain Handler
listenerWrappers []caddy.ListenerWrapper
tlsApp *caddytls.TLS
logger *zap.Logger
accessLogger *zap.Logger
errorLogger *zap.Logger
h3server *http3.Server
}
// ServeHTTP is the entry point for all HTTP requests.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Server", "Caddy")
if s.h3server != nil {
err := s.h3server.SetQuicHeaders(w.Header())
if err != nil {
s.logger.Error("setting HTTP/3 Alt-Svc header", zap.Error(err))
}
}
repl := caddy.NewReplacer()
r = PrepareRequest(r, repl, w, s)
// encode the request for logging purposes before
// it enters any handler chain; this is necessary
// to capture the original request in case it gets
// modified during handling
loggableReq := zap.Object("request", LoggableHTTPRequest{r})
errLog := s.errorLogger.With(loggableReq)
var duration time.Duration
if s.shouldLogRequest(r) {
wrec := NewResponseRecorder(w, nil, nil)
w = wrec
// capture the original version of the request
accLog := s.accessLogger.With(loggableReq)
defer func() {
repl.Set("http.response.status", wrec.Status())
repl.Set("http.response.size", wrec.Size())
repl.Set("http.response.duration", duration)
logger := accLog
if s.Logs != nil {
logger = s.Logs.wrapLogger(logger, r.Host)
}
log := logger.Info
if wrec.Status() >= 400 {
log = logger.Error
}
log("handled request",
zap.String("common_log", repl.ReplaceAll(commonLogFormat, commonLogEmptyValue)),
zap.Duration("duration", duration),
zap.Int("size", wrec.Size()),
zap.Int("status", wrec.Status()),
zap.Object("resp_headers", LoggableHTTPHeader(wrec.Header())),
)
}()
}
start := time.Now()
// guarantee ACME HTTP challenges; handle them
// separately from any user-defined handlers
if s.tlsApp.HandleHTTPChallenge(w, r) {
duration = time.Since(start)
return
}
// execute the primary handler chain
err := s.primaryHandlerChain.ServeHTTP(w, r)
duration = time.Since(start)
// if no errors, we're done!
if err == nil {
return
}
// restore original request before invoking error handler chain (issue #3717)
// TODO: this does not restore original headers, if modified (for efficiency)
origReq := r.Context().Value(OriginalRequestCtxKey).(http.Request)
r.Method = origReq.Method
r.RemoteAddr = origReq.RemoteAddr
r.RequestURI = origReq.RequestURI
cloneURL(origReq.URL, r.URL)
// prepare the error log
logger := errLog
if s.Logs != nil {
logger = s.Logs.wrapLogger(logger, r.Host)
}
logger = logger.With(zap.Duration("duration", duration))
// get the values that will be used to log the error
errStatus, errMsg, errFields := errLogValues(err)
// add HTTP error information to request context
r = s.Errors.WithError(r, err)
if s.Errors != nil && len(s.Errors.Routes) > 0 {
// execute user-defined error handling route
err2 := s.errorHandlerChain.ServeHTTP(w, r)
if err2 == nil {
// user's error route handled the error response
// successfully, so now just log the error
if errStatus >= 500 {
logger.Error(errMsg, errFields...)
}
} else {
// well... this is awkward
errFields = append([]zapcore.Field{
zap.String("error", err2.Error()),
zap.Namespace("first_error"),
zap.String("msg", errMsg),
}, errFields...)
logger.Error("error handling handler error", errFields...)
}
} else {
if errStatus >= 500 {
logger.Error(errMsg, errFields...)
}
w.WriteHeader(errStatus)
}
}
// wrapPrimaryRoute wraps stack (a compiled middleware handler chain)
// in s.enforcementHandler which performs crucial security checks, etc.
func (s *Server) wrapPrimaryRoute(stack Handler) Handler {
return HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
return s.enforcementHandler(w, r, stack)
})
}
// enforcementHandler is an implicit middleware which performs
// standard checks before executing the HTTP middleware chain.
func (s *Server) enforcementHandler(w http.ResponseWriter, r *http.Request, next Handler) error {
// enforce strict host matching, which ensures that the SNI
// value (if any), matches the Host header; essential for
// servers that rely on TLS ClientAuth sharing a listener
// with servers that do not; if not enforced, client could
// bypass by sending benign SNI then restricted Host header
if s.StrictSNIHost != nil && *s.StrictSNIHost && r.TLS != nil {
hostname, _, err := net.SplitHostPort(r.Host)
if err != nil {
hostname = r.Host // OK; probably lacked port
}
if !strings.EqualFold(r.TLS.ServerName, hostname) {
err := fmt.Errorf("strict host matching: TLS ServerName (%s) and HTTP Host (%s) values differ",
r.TLS.ServerName, hostname)
r.Close = true
return Error(http.StatusForbidden, err)
}
}
return next.ServeHTTP(w, r)
}
// listenersUseAnyPortOtherThan returns true if there are any
// listeners in s that use a port which is not otherPort.
func (s *Server) listenersUseAnyPortOtherThan(otherPort int) bool {
for _, lnAddr := range s.Listen {
laddrs, err := caddy.ParseNetworkAddress(lnAddr)
if err != nil {
continue
}
if uint(otherPort) > laddrs.EndPort || uint(otherPort) < laddrs.StartPort {
return true
}
}
return false
}
// hasListenerAddress returns true if s has a listener
// at the given address fullAddr. Currently, fullAddr
// must represent exactly one socket address (port
// ranges are not supported)
func (s *Server) hasListenerAddress(fullAddr string) bool {
laddrs, err := caddy.ParseNetworkAddress(fullAddr)
if err != nil {
return false
}
if laddrs.PortRangeSize() != 1 {
return false // TODO: support port ranges
}
for _, lnAddr := range s.Listen {
thisAddrs, err := caddy.ParseNetworkAddress(lnAddr)
if err != nil {
continue
}
if thisAddrs.Network != laddrs.Network {
continue
}
// Apparently, Linux requires all bound ports to be distinct
// *regardless of host interface* even if the addresses are
// in fact different; binding "192.168.0.1:9000" and then
// ":9000" will fail for ":9000" because "address is already
// in use" even though it's not, and the same bindings work
// fine on macOS. I also found on Linux that listening on
// "[::]:9000" would fail with a similar error, except with
// the address "0.0.0.0:9000", as if deliberately ignoring
// that I specified the IPv6 interface explicitly. This seems
// to be a major bug in the Linux network stack and I don't
// know why it hasn't been fixed yet, so for now we have to
// special-case ourselves around Linux like a doting parent.
// The second issue seems very similar to a discussion here:
// https://github.com/nodejs/node/issues/9390
//
// This is very easy to reproduce by creating an HTTP server
// that listens to both addresses or just one with a host
// interface; or for a more confusing reproduction, try
// listening on "127.0.0.1:80" and ":443" and you'll see
// the error, if you take away the GOOS condition below.
//
// So, an address is equivalent if the port is in the port
// range, and if not on Linux, the host is the same... sigh.
if (runtime.GOOS == "linux" || thisAddrs.Host == laddrs.Host) &&
(laddrs.StartPort <= thisAddrs.EndPort) &&
(laddrs.StartPort >= thisAddrs.StartPort) {
return true
}
}
return false
}
func (s *Server) hasTLSClientAuth() bool {
for _, cp := range s.TLSConnPolicies {
if cp.ClientAuthentication != nil && cp.ClientAuthentication.Active() {
return true
}
}
return false
}
// HTTPErrorConfig determines how to handle errors
// from the HTTP handlers.
type HTTPErrorConfig struct {
// The routes to evaluate after the primary handler
// chain returns an error. In an error route, extra
// placeholders are available:
//
// Placeholder | Description
// ------------|---------------
// `{http.error.status_code}` | The recommended HTTP status code
// `{http.error.status_text}` | The status text associated with the recommended status code
// `{http.error.message}` | The error message
// `{http.error.trace}` | The origin of the error
// `{http.error.id}` | An identifier for this occurrence of the error
Routes RouteList `json:"routes,omitempty"`
}
// WithError makes a shallow copy of r to add the error to its
// context, and sets placeholders on the request's replacer
// related to err. It returns the modified request which has
// the error information in its context and replacer. It
// overwrites any existing error values that are stored.
func (*HTTPErrorConfig) WithError(r *http.Request, err error) *http.Request {
// add the raw error value to the request context
// so it can be accessed by error handlers
c := context.WithValue(r.Context(), ErrorCtxKey, err)
r = r.WithContext(c)
// add error values to the replacer
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
repl.Set("http.error", err)
if handlerErr, ok := err.(HandlerError); ok {
repl.Set("http.error.status_code", handlerErr.StatusCode)
repl.Set("http.error.status_text", http.StatusText(handlerErr.StatusCode))
repl.Set("http.error.trace", handlerErr.Trace)
repl.Set("http.error.id", handlerErr.ID)
}
return r
}
// shouldLogRequest returns true if this request should be logged.
func (s *Server) shouldLogRequest(r *http.Request) bool {
if s.accessLogger == nil || s.Logs == nil {
// logging is disabled
return false
}
for _, dh := range s.Logs.SkipHosts {
// logging for this particular host is disabled
if r.Host == dh {
return false
}
}
if _, ok := s.Logs.LoggerNames[r.Host]; ok {
// this host is mapped to a particular logger name
return true
}
if s.Logs.SkipUnmappedHosts {
// this host is not mapped and thus must not be logged
return false
}
return true
}
// ServerLogConfig describes a server's logging configuration. If
// enabled without customization, all requests to this server are
// logged to the default logger; logger destinations may be
// customized per-request-host.
type ServerLogConfig struct {
// The default logger name for all logs emitted by this server for
// hostnames that are not in the LoggerNames (logger_names) map.
DefaultLoggerName string `json:"default_logger_name,omitempty"`
// LoggerNames maps request hostnames to a custom logger name.
// For example, a mapping of "example.com" to "example" would
// cause access logs from requests with a Host of example.com
// to be emitted by a logger named "http.log.access.example".
LoggerNames map[string]string `json:"logger_names,omitempty"`
// By default, all requests to this server will be logged if
// access logging is enabled. This field lists the request
// hosts for which access logging should be disabled.
SkipHosts []string `json:"skip_hosts,omitempty"`
// If true, requests to any host not appearing in the
// LoggerNames (logger_names) map will not be logged.
SkipUnmappedHosts bool `json:"skip_unmapped_hosts,omitempty"`
}
// wrapLogger wraps logger in a logger named according to user preferences for the given host.
func (slc ServerLogConfig) wrapLogger(logger *zap.Logger, host string) *zap.Logger {
if loggerName := slc.getLoggerName(host); loggerName != "" {
return logger.Named(loggerName)
}
return logger
}
func (slc ServerLogConfig) getLoggerName(host string) string {
tryHost := func(key string) (string, bool) {
// first try exact match
if loggerName, ok := slc.LoggerNames[key]; ok {
return loggerName, ok
}
// strip port and try again (i.e. Host header of "example.com:1234" should
// match "example.com" if there is no "example.com:1234" in the map)
hostOnly, _, err := net.SplitHostPort(key)
if err != nil {
return "", false
}
loggerName, ok := slc.LoggerNames[hostOnly]
return loggerName, ok
}
// try the exact hostname first
if loggerName, ok := tryHost(host); ok {
return loggerName
}
// try matching wildcard domains if other non-specific loggers exist
labels := strings.Split(host, ".")
for i := range labels {
if labels[i] == "" {
continue
}
labels[i] = "*"
wildcardHost := strings.Join(labels, ".")
if loggerName, ok := tryHost(wildcardHost); ok {
return loggerName
}
}
return slc.DefaultLoggerName
}
// PrepareRequest fills the request r for use in a Caddy HTTP handler chain. w and s can
// be nil, but the handlers will lose response placeholders and access to the server.
func PrepareRequest(r *http.Request, repl *caddy.Replacer, w http.ResponseWriter, s *Server) *http.Request {
// set up the context for the request
ctx := context.WithValue(r.Context(), caddy.ReplacerCtxKey, repl)
ctx = context.WithValue(ctx, ServerCtxKey, s)
ctx = context.WithValue(ctx, VarsCtxKey, make(map[string]interface{}))
ctx = context.WithValue(ctx, routeGroupCtxKey, make(map[string]struct{}))
var url2 url.URL // avoid letting this escape to the heap
ctx = context.WithValue(ctx, OriginalRequestCtxKey, originalRequest(r, &url2))
r = r.WithContext(ctx)
// once the pointer to the request won't change
// anymore, finish setting up the replacer
addHTTPVarsToReplacer(repl, r, w)
return r
}
// errLogValues inspects err and returns the status code
// to use, the error log message, and any extra fields.
// If err is a HandlerError, the returned values will
// have richer information.
func errLogValues(err error) (status int, msg string, fields []zapcore.Field) {
if handlerErr, ok := err.(HandlerError); ok {
status = handlerErr.StatusCode
if handlerErr.Err == nil {
msg = err.Error()
} else {
msg = handlerErr.Err.Error()
}
fields = []zapcore.Field{
zap.Int("status", handlerErr.StatusCode),
zap.String("err_id", handlerErr.ID),
zap.String("err_trace", handlerErr.Trace),
}
return
}
status = http.StatusInternalServerError
msg = err.Error()
return
}
// originalRequest returns a partial, shallow copy of
// req, including: req.Method, deep copy of req.URL
// (into the urlCopy parameter, which should be on the
// stack), req.RequestURI, and req.RemoteAddr. Notably,
// headers are not copied. This function is designed to
// be very fast and efficient, and useful primarily for
// read-only/logging purposes.
func originalRequest(req *http.Request, urlCopy *url.URL) http.Request {
cloneURL(req.URL, urlCopy)
return http.Request{
Method: req.Method,
RemoteAddr: req.RemoteAddr,
RequestURI: req.RequestURI,
URL: urlCopy,
}
}
// cloneURL makes a copy of r.URL and returns a
// new value that doesn't reference the original.
func cloneURL(from, to *url.URL) {
*to = *from
if from.User != nil {
userInfo := new(url.Userinfo)
*userInfo = *from.User
to.User = userInfo
}
}
const (
// commonLogFormat is the common log format. https://en.wikipedia.org/wiki/Common_Log_Format
commonLogFormat = `{http.request.remote.host} ` + commonLogEmptyValue + ` {http.auth.user.id} [{time.now.common_log}] "{http.request.orig_method} {http.request.orig_uri} {http.request.proto}" {http.response.status} {http.response.size}`
// commonLogEmptyValue is the common empty log value.
commonLogEmptyValue = "-"
)
// Context keys for HTTP request context values.
const (
// For referencing the server instance
ServerCtxKey caddy.CtxKey = "server"
// For the request's variable table
VarsCtxKey caddy.CtxKey = "vars"
// For a partial copy of the unmodified request that
// originally came into the server's entry handler
OriginalRequestCtxKey caddy.CtxKey = "original_request"
)
|
/*
Copyright 2021 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package osd
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/coreos/pkg/capnslog"
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
rookv1 "github.com/rook/rook/pkg/apis/rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
cephver "github.com/rook/rook/pkg/operator/ceph/version"
"github.com/rook/rook/pkg/operator/k8sutil"
"github.com/rook/rook/pkg/operator/test"
exectest "github.com/rook/rook/pkg/util/exec/test"
"github.com/stretchr/testify/assert"
"github.com/tevino/abool"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
)
// make infof function for helping identify logs from unit test helpers versus from runtime code.
func infof(t *testing.T, format string, args ...interface{}) {
logger.Infof(t.Name()+": "+format, args...)
}
// The definition for this test is a wrapper for the test function that adds a timeout
func TestOSDsOnPVC(t *testing.T) {
oldLogger := *logger
defer func() { logger = &oldLogger }() // reset logger to default after this test
logger.SetLevel(capnslog.TRACE) // want more log info for this test if it fails
oldOpportunisticDuration := osdOpportunisticUpdateDuration
defer func() { osdOpportunisticUpdateDuration = oldOpportunisticDuration }()
// lower the opportunistic update check duration for unit tests to speed them up
osdOpportunisticUpdateDuration = 1 * time.Millisecond
// This test runs in less than 150 milliseconds on a 6-core CPU w/ 16GB RAM
// GitHub CI runner can be a lot slower (~2.5 seconds)
done := make(chan bool)
timeout := time.After(50 * 150 * time.Millisecond)
go func() {
// use defer because t.Fatal will kill this goroutine, and we always want done set if the
// func stops running
defer func() { done <- true }()
// run the actual test
testOSDsOnPVC(t)
}()
select {
case <-timeout:
t.Fatal("Test timed out. This is a test failure.")
case <-done:
}
}
// This is the actual test. If it hangs, we should consider that an error. Writing a timeout in
// tests requires running the test in a goroutine, so there is a timeout wrapper above
func testOSDsOnPVC(t *testing.T) {
namespace := "osd-on-pvc"
ctx := context.TODO()
// we don't need to create actual nodes for this test, but this is the set of nodes which should
// we will use to create fake placements for OSD prepare job pods
osdIDGenerator := newOSDIDGenerator()
// set up a fake k8s client set and watcher to generate events that the operator will listen to
clientset := test.NewComplexClientset(t)
test.AddSomeReadyNodes(t, clientset, 3)
assignPodToNode := true
test.PrependComplexJobReactor(t, clientset, assignPodToNode)
test.SetFakeKubernetesVersion(clientset, "v1.13.2") // v1.13 or higher is required for OSDs on PVC
os.Setenv(k8sutil.PodNamespaceEnvVar, namespace)
defer os.Unsetenv(k8sutil.PodNamespaceEnvVar)
statusMapWatcher := watch.NewRaceFreeFake()
clientset.PrependWatchReactor("configmaps", k8stesting.DefaultWatchReactor(statusMapWatcher, nil))
// Helper methods to set "completed" status on "starting" ConfigMaps.
setStatusConfigMapToCompleted := func(cm *corev1.ConfigMap) {
// can't use mockNodeOrchestrationCompleted here b/c it uses a context.CoreV1() method that
// is mutex locked when a reactor is processing
status := parseOrchestrationStatus(cm.Data)
infof(t, "configmap reactor: updating configmap %q status to completed", cm.Name)
// configmap names are deterministic can be mapped indirectly to an OSD ID, and since the
// configmaps are used to report completion status of OSD provisioning, we use this property in
// thse unit tests
osdID := osdIDGenerator.osdID(t, cm.Name)
status.Status = OrchestrationStatusCompleted
status.PvcBackedOSD = true
status.OSDs = []OSDInfo{
{
ID: osdID,
UUID: fmt.Sprintf("%032d", osdID),
BlockPath: "/dev/path/to/block",
},
}
s, _ := json.Marshal(status)
cm.Data[orchestrationStatusKey] = string(s)
}
createConfigMapWithStatusStartingCallback := func(cm *corev1.ConfigMap) {
// placeholder to be defined later in tests
}
deleteConfigMapWithStatusAlreadyExistsCallback := func(cm *corev1.ConfigMap, action k8stesting.DeleteActionImpl) {
// placeholder to be defined later in tests
}
var cmReactor k8stesting.ReactionFunc = func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
switch action := action.(type) {
case k8stesting.CreateActionImpl:
obj := action.GetObject()
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
t.Fatal("err! action not a configmap")
}
status := parseOrchestrationStatus(cm.Data)
if status.Status == OrchestrationStatusStarting {
// allow tests to specify a custom callback for this case
createConfigMapWithStatusStartingCallback(cm)
}
case k8stesting.DeleteActionImpl:
// get the CM being deleted to figure out some info about it
obj, err := clientset.Tracker().Get(action.GetResource(), action.GetNamespace(), action.Name)
if err != nil {
t.Fatalf("err! could not get configmap %q", action.Name)
}
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
t.Fatal("err! action not a configmap")
}
status := parseOrchestrationStatus(cm.Data)
if status.Status == OrchestrationStatusAlreadyExists {
infof(t, "configmap reactor: delete: OSD for configmap %q was updated", cm.Name)
// allow tests to specify a custom callback for this case
deleteConfigMapWithStatusAlreadyExistsCallback(cm, action)
} else if status.Status == OrchestrationStatusCompleted {
infof(t, "configmap reactor: delete: OSD for configmap %q was created", cm.Name)
}
}
// modify it in-place and allow it to be created later with these changes
return false, nil, nil
}
clientset.PrependReactor("*", "configmaps", cmReactor)
deploymentOps := newResourceOperationList()
// make a very simple reactor to record when deployments were created
var deploymentReactor k8stesting.ReactionFunc = func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
createAction, ok := action.(k8stesting.CreateAction)
if !ok {
t.Fatal("err! action is not a create action")
return false, nil, nil
}
obj := createAction.GetObject()
d, ok := obj.(*appsv1.Deployment)
if !ok {
t.Fatal("err! action not a deployment")
return false, nil, nil
}
if o, _ := clientset.Tracker().Get(action.GetResource(), d.Namespace, d.Name); o != nil {
// deployment already exists, so this isn't be a valid create
return false, nil, nil
}
infof(t, "creating deployment %q", d.Name)
deploymentOps.Add(d.Name, "create")
return false, nil, nil
}
clientset.PrependReactor("create", "deployments", deploymentReactor)
// patch the updateDeploymentAndWait function to always report success and record when
// deployments are updated
oldUDAW := updateDeploymentAndWait
defer func() {
updateDeploymentAndWait = oldUDAW
}()
updateDeploymentAndWait = func(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, deployment *appsv1.Deployment, daemonType string, daemonName string, skipUpgradeChecks bool, continueUpgradeAfterChecksEvenIfNotHealthy bool) error {
infof(t, "updating deployment %q", deployment.Name)
deploymentOps.Add(deployment.Name, "update")
return nil
}
// wait for a number of deployments to be updated
waitForDeploymentOps := func(count int) {
for {
if deploymentOps.Len() >= count {
return
}
<-time.After(1 * time.Millisecond)
}
}
clusterInfo := &cephclient.ClusterInfo{
Namespace: namespace,
CephVersion: cephver.Nautilus,
}
executor := osdPVCTestExecutor(t, clientset, namespace)
context := &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook", Executor: executor, RequestCancelOrchestration: abool.New()}
storageClassName := "test-storage-class"
volumeMode := corev1.PersistentVolumeBlock
spec := cephv1.ClusterSpec{
CephVersion: cephv1.CephVersionSpec{
Image: "ceph/ceph:v14.2.2",
},
DataDirHostPath: context.ConfigDir,
Storage: rookv1.StorageScopeSpec{
StorageClassDeviceSets: []rookv1.StorageClassDeviceSet{
{
Name: "set1",
Count: 5,
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "data",
},
Spec: corev1.PersistentVolumeClaimSpec{
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("10Gi"),
},
},
StorageClassName: &storageClassName,
VolumeMode: &volumeMode,
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
},
},
},
},
},
},
}
// =============================================================================================
infof(t, "Step 1: create new PVCs")
// when creating new configmaps with status "starting", simulate them becoming "completed"
// before any opportunistic updates can happen by changing the status to "completed" before
// the config map gets created by the fake k8s clientset.
createConfigMapWithStatusStartingCallback = func(cm *corev1.ConfigMap) {
setStatusConfigMapToCompleted(cm)
}
deleteConfigMapWithStatusAlreadyExistsCallback = func(cm *corev1.ConfigMap, action k8stesting.DeleteActionImpl) {
// do nothing on CM deletes
}
var c *Cluster
run := func() {
// kick off the start of the orchestration in a goroutine so we can watch the results
// and manipulate confimaps in the test if needed
c = New(context, clusterInfo, spec, "myversion")
go func() {
provisionConfig := c.newProvisionConfig()
c.startProvisioningOverPVCs(provisionConfig)
}()
}
run()
numExpectedPVCs := 5
waitForNumPVCs(t, clientset, namespace, numExpectedPVCs)
waitForNumDeployments(t, clientset, namespace, numExpectedPVCs)
// 5 deployments should have been created
assert.Equal(t, 5, deploymentOps.Len())
// all 5 should be create operations
assert.Len(t, deploymentOps.ResourcesWithOperation("create"), 5)
infof(t, "deployments successfully created for new PVCs")
// =============================================================================================
infof(t, "Step 2: verify deployments are updated when run again")
// clean the create times maps
reset := func() {
deploymentOps = newResourceOperationList()
// fake 'watcher' can close the channel for long tests, so reset when we can
statusMapWatcher.Reset()
}
reset()
run()
waitForNumPVCs(t, clientset, namespace, numExpectedPVCs)
// 5 deployments should have been operated on
waitForDeploymentOps(numExpectedPVCs)
// all 5 should be update operations
updatedDeployments := deploymentOps.ResourcesWithOperation("update")
assert.Len(t, updatedDeployments, 5)
// use later to ensure existing deployments are updated
existingDeployments := updatedDeployments
// =============================================================================================
infof(t, "Step 3: verify new deployments are created before existing ones are updated")
reset()
spec.Storage.StorageClassDeviceSets[0].Count = 8
numExpectedPVCs = 8
run()
waitForNumPVCs(t, clientset, namespace, numExpectedPVCs)
waitForNumDeployments(t, clientset, namespace, numExpectedPVCs)
waitForDeploymentOps(numExpectedPVCs)
// the same deployments from before should be updated here also
updatedDeployments = deploymentOps.ResourcesWithOperation("update")
assert.Len(t, updatedDeployments, 5)
assert.ElementsMatch(t, existingDeployments, updatedDeployments)
createdDeployments := deploymentOps.ResourcesWithOperation("create")
assert.Len(t, createdDeployments, 3)
for i, do := range deploymentOps.List() {
if i < 3 {
// first 3 ops should be create ops
assert.Equal(t, "create", do.operation)
} else {
// final 5 ops should be update ops
assert.Equal(t, "update", do.operation)
}
}
existingDeployments = append(createdDeployments, updatedDeployments...)
// =============================================================================================
infof(t, "Step 4: verify updates can happen opportunistically")
reset()
spec.Storage.StorageClassDeviceSets[0].Count = 10
numExpectedPVCs = 10
// In this test we carefully control the configmaps. When a configmap with status
// "alreadyExisting" is deleted, we know an OSD deployment just finished updating. We then
// immediately set one of the configmaps in "starting" state to "completed" so that it should
// be the next status configmap to be processed; a new OSD should be created for it. We
// therefore know that the first operation should be an update and the second a create. Then on
// in update-then-create fashion until all creates are done, followed by all updates.
configMapsThatNeedUpdatedToCompleted := []string{}
createConfigMapWithStatusStartingCallback = func(cm *corev1.ConfigMap) {
infof(t, "configmap reactor: create: marking that configmap %q needs to be completed later", cm.Name)
configMapsThatNeedUpdatedToCompleted = append(configMapsThatNeedUpdatedToCompleted, cm.Name)
}
deleteConfigMapWithStatusAlreadyExistsCallback = func(cm *corev1.ConfigMap, action k8stesting.DeleteActionImpl) {
if len(configMapsThatNeedUpdatedToCompleted) > 0 {
cmName := configMapsThatNeedUpdatedToCompleted[0]
obj, err := clientset.Tracker().Get(action.GetResource(), action.GetNamespace(), cmName)
if err != nil {
t.Fatalf("err! could not get configmap %q", cmName)
}
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
t.Fatal("err! action not a configmap")
}
setStatusConfigMapToCompleted(cm)
err = clientset.Tracker().Update(action.GetResource(), cm, action.GetNamespace())
if err != nil {
t.Fatalf("err! failed to update configmap to completed. %v", err)
}
statusMapWatcher.Modify(cm) // MUST inform the fake watcher we made a change
configMapsThatNeedUpdatedToCompleted = configMapsThatNeedUpdatedToCompleted[1:]
}
}
run()
waitForNumPVCs(t, clientset, namespace, numExpectedPVCs)
waitForNumDeployments(t, clientset, namespace, numExpectedPVCs)
waitForDeploymentOps(numExpectedPVCs)
updatedDeployments = deploymentOps.ResourcesWithOperation("update")
assert.Len(t, updatedDeployments, 8)
assert.ElementsMatch(t, existingDeployments, updatedDeployments)
createdDeployments = deploymentOps.ResourcesWithOperation("create")
assert.Len(t, createdDeployments, 2)
assert.Equal(t,
[]string{
"update",
"create",
"update",
"create",
"update",
"update",
"update",
"update",
"update",
"update",
}, deploymentOps.OperationsInOrder())
existingDeployments = append(createdDeployments, updatedDeployments...)
// =============================================================================================
infof(t, "Step 5: verify opportunistic updates can all happen before creates")
reset()
spec.Storage.StorageClassDeviceSets[0].Count = 12
numExpectedPVCs = 12
// In this test, we stop all configmaps from being moved from "starting" to "completed" status
// in the configmap reactor so that all opportunistic updates should happen before new OSDs
// get created.
configMapsThatNeedUpdatedToCompleted = []string{}
createConfigMapWithStatusStartingCallback = func(cm *corev1.ConfigMap) {
// re-define this behavior as a reminder for readers of the test
infof(t, "configmap reactor: create: marking that configmap %q needs to be completed later", cm.Name)
configMapsThatNeedUpdatedToCompleted = append(configMapsThatNeedUpdatedToCompleted, cm.Name)
}
deleteConfigMapWithStatusAlreadyExistsCallback = func(cm *corev1.ConfigMap, action k8stesting.DeleteActionImpl) {
// do NOT automatically move configmaps from "starting" to "completed"
}
run()
waitForDeploymentOps(10) // wait for 10 updates
updatedDeployments = deploymentOps.ResourcesWithOperation("update")
assert.Len(t, updatedDeployments, 10)
assert.ElementsMatch(t, existingDeployments, updatedDeployments)
// update configmaps from "starting" to "completed"
for _, cmName := range configMapsThatNeedUpdatedToCompleted {
cm, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, cmName, metav1.GetOptions{})
assert.NoError(t, err)
setStatusConfigMapToCompleted(cm)
cm, err = clientset.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{})
assert.NoError(t, err)
statusMapWatcher.Modify(cm) // MUST inform the fake watcher we made a change
}
waitForNumPVCs(t, clientset, namespace, numExpectedPVCs)
waitForNumDeployments(t, clientset, namespace, numExpectedPVCs)
waitForDeploymentOps(numExpectedPVCs)
// should be 2 more create operations
createdDeployments = deploymentOps.ResourcesWithOperation("create")
assert.Len(t, createdDeployments, 2)
for i, do := range deploymentOps.List() {
if i < 10 {
// first 10 ops should be update ops
assert.Equal(t, "update", do.operation)
} else {
// final 2 ops should be update ops
assert.Equal(t, "create", do.operation)
}
}
infof(t, "success")
}
/*
* mock executor to handle ceph commands
*/
func osdPVCTestExecutor(t *testing.T, clientset *fake.Clientset, namespace string) *exectest.MockExecutor {
return &exectest.MockExecutor{
MockExecuteCommandWithOutputFile: func(command string, outFileArg string, args ...string) (string, error) {
infof(t, "command: %s %v", command, args)
if command != "ceph" {
return "", errors.Errorf("unexpected command %q with args %v", command, args)
}
if args[0] == "auth" {
if args[1] == "get-or-create-key" {
return "{\"key\": \"does-not-matter\"}", nil
}
}
if args[0] == "osd" {
if args[1] == "ok-to-stop" {
return "", nil // no need to return text, only output status is used
}
if args[1] == "ls" {
// ceph osd ls returns an array of osd IDs like [0,1,2]
// build this based on the number of deployments since they should be equal
// for this test
l, err := clientset.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
t.Fatalf("failed to build 'ceph osd ls' output. %v", err)
}
num := len(l.Items)
a := []string{}
for i := 0; i < num; i++ {
a = append(a, strconv.Itoa(i))
}
return fmt.Sprintf("[%s]", strings.Join(a, ",")), nil
}
if args[1] == "tree" {
return `{"nodes":[{"id":-1,"name":"default","type":"root","type_id":11,"children":[-3]},{"id":-3,"name":"master","type":"host","type_id":1,"pool_weights":{},"children":[2,1,0]},{"id":0,"device_class":"hdd","name":"osd.0","type":"osd","type_id":0,"crush_weight":0.009796142578125,"depth":2,"pool_weights":{},"exists":1,"status":"up","reweight":1,"primary_affinity":1},{"id":1,"device_class":"hdd","name":"osd.1","type":"osd","type_id":0,"crush_weight":0.009796142578125,"depth":2,"pool_weights":{},"exists":1,"status":"up","reweight":1,"primary_affinity":1},{"id":2,"device_class":"hdd","name":"osd.2","type":"osd","type_id":0,"crush_weight":0.009796142578125,"depth":2,"pool_weights":{},"exists":1,"status":"up","reweight":1,"primary_affinity":1}],"stray":[]}`, nil
}
}
if args[0] == "versions" {
// the update deploy code only cares about the mons from the ceph version command results
v := `{"mon":{"ceph version 14.2.2 (somehash) nautilus (stable)":3}}`
return v, nil
}
return "", errors.Errorf("unexpected ceph command %q", args)
},
}
}
/*
* basic helper functions
*/
// node names for OSDs on PVC end up being the name of the PVC
func waitForNumPVCs(t *testing.T, clientset *fake.Clientset, namespace string, count int) {
for {
l, err := clientset.CoreV1().PersistentVolumeClaims(namespace).List(context.TODO(), metav1.ListOptions{})
assert.NoError(t, err)
if len(l.Items) >= count {
infof(t, "PVCs for OSDs on PVC all exist")
break
}
<-time.After(1 * time.Millisecond)
}
}
func waitForNumDeployments(t *testing.T, clientset *fake.Clientset, namespace string, count int) {
for {
l, err := clientset.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{})
assert.NoError(t, err)
if len(l.Items) >= count {
infof(t, "Deployments for OSDs on PVC all exist")
break
}
<-time.After(1 * time.Millisecond)
}
}
/*
* Unique and consistent OSD ID generator
*/
type osdIDGenerator struct {
nextOSDID int
osdIDMap map[string]int
}
func newOSDIDGenerator() osdIDGenerator {
return osdIDGenerator{
nextOSDID: 0,
osdIDMap: map[string]int{},
}
}
func (g *osdIDGenerator) osdID(t *testing.T, namedResource string) int {
if id, ok := g.osdIDMap[namedResource]; ok {
infof(t, "resource %q has existing OSD ID %d", namedResource, id)
return id
}
id := g.nextOSDID
g.osdIDMap[namedResource] = id
g.nextOSDID++
infof(t, "generated new OSD ID %d for resource %q", id, namedResource)
return id
}
/*
* resourceOperationList
* We want to keep track of the order in which some resources (notably deployments) are created and
* updated, and the tracker should be thread safe since there could be operations occurring in
* parallel.
*/
type resourceOperation struct {
resourceName string
operation string // e.g., "create", "update"
}
func newResourceOperation(resourceName, operation string) resourceOperation {
return resourceOperation{resourceName, operation}
}
type resourceOperationList struct {
sync.Mutex
resourceOps []resourceOperation
}
func newResourceOperationList() *resourceOperationList {
return &resourceOperationList{
sync.Mutex{},
[]resourceOperation{},
}
}
func (r *resourceOperationList) Add(resourceName, operation string) {
r.Lock()
defer r.Unlock()
r.resourceOps = append(r.resourceOps, newResourceOperation(resourceName, operation))
}
func (r *resourceOperationList) Len() int {
r.Lock()
defer r.Unlock()
return len(r.resourceOps)
}
func (r *resourceOperationList) List() []resourceOperation {
return r.resourceOps
}
// Return only the resources which have a given operation
func (r *resourceOperationList) ResourcesWithOperation(operation string) []string {
resources := []string{}
for _, ro := range r.List() {
if ro.operation == operation {
resources = append(resources, ro.resourceName)
}
}
return resources
}
// Return all operations in order without resource names
func (r *resourceOperationList) OperationsInOrder() []string {
ops := []string{}
for _, ro := range r.List() {
ops = append(ops, ro.operation)
}
return ops
}
ceph: fix flaky OSD on PVC unit test
This was a race condition where the test would sometimes finish before a
deferred watcher.Stop() command was called and sometimes would not.
Signed-off-by: Blaine Gardner <135b86f2968bdbcc730c10d8ababd343bb675986@redhat.com>
/*
Copyright 2021 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package osd
import (
"context"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/coreos/pkg/capnslog"
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
rookv1 "github.com/rook/rook/pkg/apis/rook.io/v1"
"github.com/rook/rook/pkg/clusterd"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
cephver "github.com/rook/rook/pkg/operator/ceph/version"
"github.com/rook/rook/pkg/operator/k8sutil"
"github.com/rook/rook/pkg/operator/test"
exectest "github.com/rook/rook/pkg/util/exec/test"
"github.com/stretchr/testify/assert"
"github.com/tevino/abool"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
)
// make infof function for helping identify logs from unit test helpers versus from runtime code.
func infof(t *testing.T, format string, args ...interface{}) {
logger.Infof(t.Name()+": "+format, args...)
}
// The definition for this test is a wrapper for the test function that adds a timeout
func TestOSDsOnPVC(t *testing.T) {
oldLogger := *logger
defer func() { logger = &oldLogger }() // reset logger to default after this test
logger.SetLevel(capnslog.TRACE) // want more log info for this test if it fails
oldOpportunisticDuration := osdOpportunisticUpdateDuration
defer func() { osdOpportunisticUpdateDuration = oldOpportunisticDuration }()
// lower the opportunistic update check duration for unit tests to speed them up
osdOpportunisticUpdateDuration = 1 * time.Millisecond
// This test runs in less than 150 milliseconds on a 6-core CPU w/ 16GB RAM
// GitHub CI runner can be a lot slower (~2.5 seconds)
done := make(chan bool)
timeout := time.After(50 * 150 * time.Millisecond)
go func() {
// use defer because t.Fatal will kill this goroutine, and we always want done set if the
// func stops running
defer func() { done <- true }()
// run the actual test
testOSDsOnPVC(t)
}()
select {
case <-timeout:
t.Fatal("Test timed out. This is a test failure.")
case <-done:
}
}
// This is the actual test. If it hangs, we should consider that an error. Writing a timeout in
// tests requires running the test in a goroutine, so there is a timeout wrapper above
func testOSDsOnPVC(t *testing.T) {
namespace := "osd-on-pvc"
ctx := context.TODO()
// we don't need to create actual nodes for this test, but this is the set of nodes which should
// we will use to create fake placements for OSD prepare job pods
osdIDGenerator := newOSDIDGenerator()
// set up a fake k8s client set and watcher to generate events that the operator will listen to
clientset := test.NewComplexClientset(t)
test.AddSomeReadyNodes(t, clientset, 3)
assignPodToNode := true
test.PrependComplexJobReactor(t, clientset, assignPodToNode)
test.SetFakeKubernetesVersion(clientset, "v1.13.2") // v1.13 or higher is required for OSDs on PVC
os.Setenv(k8sutil.PodNamespaceEnvVar, namespace)
defer os.Unsetenv(k8sutil.PodNamespaceEnvVar)
statusMapWatcher := watch.NewRaceFreeFake()
clientset.PrependWatchReactor("configmaps", k8stesting.DefaultWatchReactor(statusMapWatcher, nil))
// Helper methods to set "completed" status on "starting" ConfigMaps.
setStatusConfigMapToCompleted := func(cm *corev1.ConfigMap) {
// can't use mockNodeOrchestrationCompleted here b/c it uses a context.CoreV1() method that
// is mutex locked when a reactor is processing
status := parseOrchestrationStatus(cm.Data)
infof(t, "configmap reactor: updating configmap %q status to completed", cm.Name)
// configmap names are deterministic can be mapped indirectly to an OSD ID, and since the
// configmaps are used to report completion status of OSD provisioning, we use this property in
// thse unit tests
osdID := osdIDGenerator.osdID(t, cm.Name)
status.Status = OrchestrationStatusCompleted
status.PvcBackedOSD = true
status.OSDs = []OSDInfo{
{
ID: osdID,
UUID: fmt.Sprintf("%032d", osdID),
BlockPath: "/dev/path/to/block",
},
}
s, _ := json.Marshal(status)
cm.Data[orchestrationStatusKey] = string(s)
}
createConfigMapWithStatusStartingCallback := func(cm *corev1.ConfigMap) {
// placeholder to be defined later in tests
}
deleteConfigMapWithStatusAlreadyExistsCallback := func(cm *corev1.ConfigMap, action k8stesting.DeleteActionImpl) {
// placeholder to be defined later in tests
}
var cmReactor k8stesting.ReactionFunc = func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
switch action := action.(type) {
case k8stesting.CreateActionImpl:
obj := action.GetObject()
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
t.Fatal("err! action not a configmap")
}
status := parseOrchestrationStatus(cm.Data)
if status.Status == OrchestrationStatusStarting {
// allow tests to specify a custom callback for this case
createConfigMapWithStatusStartingCallback(cm)
}
case k8stesting.DeleteActionImpl:
// get the CM being deleted to figure out some info about it
obj, err := clientset.Tracker().Get(action.GetResource(), action.GetNamespace(), action.Name)
if err != nil {
t.Fatalf("err! could not get configmap %q", action.Name)
}
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
t.Fatal("err! action not a configmap")
}
status := parseOrchestrationStatus(cm.Data)
if status.Status == OrchestrationStatusAlreadyExists {
infof(t, "configmap reactor: delete: OSD for configmap %q was updated", cm.Name)
// allow tests to specify a custom callback for this case
deleteConfigMapWithStatusAlreadyExistsCallback(cm, action)
} else if status.Status == OrchestrationStatusCompleted {
infof(t, "configmap reactor: delete: OSD for configmap %q was created", cm.Name)
}
}
// modify it in-place and allow it to be created later with these changes
return false, nil, nil
}
clientset.PrependReactor("*", "configmaps", cmReactor)
deploymentOps := newResourceOperationList()
// make a very simple reactor to record when deployments were created
var deploymentReactor k8stesting.ReactionFunc = func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
createAction, ok := action.(k8stesting.CreateAction)
if !ok {
t.Fatal("err! action is not a create action")
return false, nil, nil
}
obj := createAction.GetObject()
d, ok := obj.(*appsv1.Deployment)
if !ok {
t.Fatal("err! action not a deployment")
return false, nil, nil
}
if o, _ := clientset.Tracker().Get(action.GetResource(), d.Namespace, d.Name); o != nil {
// deployment already exists, so this isn't be a valid create
return false, nil, nil
}
infof(t, "creating deployment %q", d.Name)
deploymentOps.Add(d.Name, "create")
return false, nil, nil
}
clientset.PrependReactor("create", "deployments", deploymentReactor)
// patch the updateDeploymentAndWait function to always report success and record when
// deployments are updated
oldUDAW := updateDeploymentAndWait
defer func() {
updateDeploymentAndWait = oldUDAW
}()
updateDeploymentAndWait = func(context *clusterd.Context, clusterInfo *cephclient.ClusterInfo, deployment *appsv1.Deployment, daemonType string, daemonName string, skipUpgradeChecks bool, continueUpgradeAfterChecksEvenIfNotHealthy bool) error {
infof(t, "updating deployment %q", deployment.Name)
deploymentOps.Add(deployment.Name, "update")
return nil
}
// wait for a number of deployments to be updated
waitForDeploymentOps := func(count int) {
for {
if deploymentOps.Len() >= count {
return
}
<-time.After(1 * time.Millisecond)
}
}
clusterInfo := &cephclient.ClusterInfo{
Namespace: namespace,
CephVersion: cephver.Nautilus,
}
executor := osdPVCTestExecutor(t, clientset, namespace)
context := &clusterd.Context{Clientset: clientset, ConfigDir: "/var/lib/rook", Executor: executor, RequestCancelOrchestration: abool.New()}
storageClassName := "test-storage-class"
volumeMode := corev1.PersistentVolumeBlock
spec := cephv1.ClusterSpec{
CephVersion: cephv1.CephVersionSpec{
Image: "ceph/ceph:v14.2.2",
},
DataDirHostPath: context.ConfigDir,
Storage: rookv1.StorageScopeSpec{
StorageClassDeviceSets: []rookv1.StorageClassDeviceSet{
{
Name: "set1",
Count: 5,
VolumeClaimTemplates: []corev1.PersistentVolumeClaim{
{
ObjectMeta: metav1.ObjectMeta{
Name: "data",
},
Spec: corev1.PersistentVolumeClaimSpec{
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("10Gi"),
},
},
StorageClassName: &storageClassName,
VolumeMode: &volumeMode,
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
},
},
},
},
},
},
}
// =============================================================================================
infof(t, "Step 1: create new PVCs")
// when creating new configmaps with status "starting", simulate them becoming "completed"
// before any opportunistic updates can happen by changing the status to "completed" before
// the config map gets created by the fake k8s clientset.
createConfigMapWithStatusStartingCallback = func(cm *corev1.ConfigMap) {
setStatusConfigMapToCompleted(cm)
}
deleteConfigMapWithStatusAlreadyExistsCallback = func(cm *corev1.ConfigMap, action k8stesting.DeleteActionImpl) {
// do nothing on CM deletes
}
var c *Cluster
var provisionDone bool = false
waitForDone := func() {
for {
if provisionDone {
infof(t, "provisioning done")
break
}
infof(t, "provisioning not done. waiting...")
time.Sleep(time.Millisecond)
}
}
run := func() {
// reset
deploymentOps = newResourceOperationList()
statusMapWatcher.Reset()
// kick off the start of the orchestration in a goroutine so we can watch the results
// and manipulate confimaps in the test if needed
c = New(context, clusterInfo, spec, "myversion")
provisionDone = false
go func() {
provisionConfig := c.newProvisionConfig()
c.startProvisioningOverPVCs(provisionConfig)
provisionDone = true
}()
}
run()
numExpectedPVCs := 5
waitForNumPVCs(t, clientset, namespace, numExpectedPVCs)
waitForNumDeployments(t, clientset, namespace, numExpectedPVCs)
// 5 deployments should have been created
assert.Equal(t, 5, deploymentOps.Len())
// all 5 should be create operations
assert.Len(t, deploymentOps.ResourcesWithOperation("create"), 5)
infof(t, "deployments successfully created for new PVCs")
waitForDone()
// =============================================================================================
infof(t, "Step 2: verify deployments are updated when run again")
// clean the create times maps
reset := func() {
deploymentOps = newResourceOperationList()
// fake 'watcher' can close the channel for long tests, so reset when we can
statusMapWatcher.Reset()
}
reset()
run()
waitForNumPVCs(t, clientset, namespace, numExpectedPVCs)
// 5 deployments should have been operated on
waitForDeploymentOps(numExpectedPVCs)
// all 5 should be update operations
updatedDeployments := deploymentOps.ResourcesWithOperation("update")
assert.Len(t, updatedDeployments, 5)
// use later to ensure existing deployments are updated
existingDeployments := updatedDeployments
waitForDone()
// =============================================================================================
infof(t, "Step 3: verify new deployments are created before existing ones are updated")
reset()
spec.Storage.StorageClassDeviceSets[0].Count = 8
numExpectedPVCs = 8
run()
waitForNumPVCs(t, clientset, namespace, numExpectedPVCs)
waitForNumDeployments(t, clientset, namespace, numExpectedPVCs)
waitForDeploymentOps(numExpectedPVCs)
// the same deployments from before should be updated here also
updatedDeployments = deploymentOps.ResourcesWithOperation("update")
assert.Len(t, updatedDeployments, 5)
assert.ElementsMatch(t, existingDeployments, updatedDeployments)
createdDeployments := deploymentOps.ResourcesWithOperation("create")
assert.Len(t, createdDeployments, 3)
for i, do := range deploymentOps.List() {
if i < 3 {
// first 3 ops should be create ops
assert.Equal(t, "create", do.operation)
} else {
// final 5 ops should be update ops
assert.Equal(t, "update", do.operation)
}
}
existingDeployments = append(createdDeployments, updatedDeployments...)
waitForDone()
// =============================================================================================
infof(t, "Step 4: verify updates can happen opportunistically")
reset()
spec.Storage.StorageClassDeviceSets[0].Count = 10
numExpectedPVCs = 10
// In this test we carefully control the configmaps. When a configmap with status
// "alreadyExisting" is deleted, we know an OSD deployment just finished updating. We then
// immediately set one of the configmaps in "starting" state to "completed" so that it should
// be the next status configmap to be processed; a new OSD should be created for it. We
// therefore know that the first operation should be an update and the second a create. Then on
// in update-then-create fashion until all creates are done, followed by all updates.
configMapsThatNeedUpdatedToCompleted := []string{}
createConfigMapWithStatusStartingCallback = func(cm *corev1.ConfigMap) {
infof(t, "configmap reactor: create: marking that configmap %q needs to be completed later", cm.Name)
configMapsThatNeedUpdatedToCompleted = append(configMapsThatNeedUpdatedToCompleted, cm.Name)
}
deleteConfigMapWithStatusAlreadyExistsCallback = func(cm *corev1.ConfigMap, action k8stesting.DeleteActionImpl) {
if len(configMapsThatNeedUpdatedToCompleted) > 0 {
cmName := configMapsThatNeedUpdatedToCompleted[0]
obj, err := clientset.Tracker().Get(action.GetResource(), action.GetNamespace(), cmName)
if err != nil {
t.Fatalf("err! could not get configmap %q", cmName)
}
cm, ok := obj.(*corev1.ConfigMap)
if !ok {
t.Fatal("err! action not a configmap")
}
setStatusConfigMapToCompleted(cm)
err = clientset.Tracker().Update(action.GetResource(), cm, action.GetNamespace())
if err != nil {
t.Fatalf("err! failed to update configmap to completed. %v", err)
}
statusMapWatcher.Modify(cm) // MUST inform the fake watcher we made a change
configMapsThatNeedUpdatedToCompleted = configMapsThatNeedUpdatedToCompleted[1:]
}
}
run()
waitForNumPVCs(t, clientset, namespace, numExpectedPVCs)
waitForNumDeployments(t, clientset, namespace, numExpectedPVCs)
waitForDeploymentOps(numExpectedPVCs)
updatedDeployments = deploymentOps.ResourcesWithOperation("update")
assert.Len(t, updatedDeployments, 8)
assert.ElementsMatch(t, existingDeployments, updatedDeployments)
createdDeployments = deploymentOps.ResourcesWithOperation("create")
assert.Len(t, createdDeployments, 2)
assert.Equal(t,
[]string{
"update",
"create",
"update",
"create",
"update",
"update",
"update",
"update",
"update",
"update",
}, deploymentOps.OperationsInOrder())
existingDeployments = append(createdDeployments, updatedDeployments...)
waitForDone()
// =============================================================================================
infof(t, "Step 5: verify opportunistic updates can all happen before creates")
reset()
spec.Storage.StorageClassDeviceSets[0].Count = 12
numExpectedPVCs = 12
// In this test, we stop all configmaps from being moved from "starting" to "completed" status
// in the configmap reactor so that all opportunistic updates should happen before new OSDs
// get created.
configMapsThatNeedUpdatedToCompleted = []string{}
createConfigMapWithStatusStartingCallback = func(cm *corev1.ConfigMap) {
// re-define this behavior as a reminder for readers of the test
infof(t, "configmap reactor: create: marking that configmap %q needs to be completed later", cm.Name)
configMapsThatNeedUpdatedToCompleted = append(configMapsThatNeedUpdatedToCompleted, cm.Name)
}
deleteConfigMapWithStatusAlreadyExistsCallback = func(cm *corev1.ConfigMap, action k8stesting.DeleteActionImpl) {
// do NOT automatically move configmaps from "starting" to "completed"
}
run()
waitForDeploymentOps(10) // wait for 10 updates
updatedDeployments = deploymentOps.ResourcesWithOperation("update")
assert.Len(t, updatedDeployments, 10)
assert.ElementsMatch(t, existingDeployments, updatedDeployments)
// update configmaps from "starting" to "completed"
for _, cmName := range configMapsThatNeedUpdatedToCompleted {
cm, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, cmName, metav1.GetOptions{})
assert.NoError(t, err)
setStatusConfigMapToCompleted(cm)
cm, err = clientset.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{})
assert.NoError(t, err)
statusMapWatcher.Modify(cm) // MUST inform the fake watcher we made a change
}
waitForNumPVCs(t, clientset, namespace, numExpectedPVCs)
waitForNumDeployments(t, clientset, namespace, numExpectedPVCs)
waitForDeploymentOps(numExpectedPVCs)
// should be 2 more create operations
createdDeployments = deploymentOps.ResourcesWithOperation("create")
assert.Len(t, createdDeployments, 2)
for i, do := range deploymentOps.List() {
if i < 10 {
// first 10 ops should be update ops
assert.Equal(t, "update", do.operation)
} else {
// final 2 ops should be update ops
assert.Equal(t, "create", do.operation)
}
}
waitForDone()
infof(t, "success")
}
/*
* mock executor to handle ceph commands
*/
func osdPVCTestExecutor(t *testing.T, clientset *fake.Clientset, namespace string) *exectest.MockExecutor {
return &exectest.MockExecutor{
MockExecuteCommandWithOutputFile: func(command string, outFileArg string, args ...string) (string, error) {
infof(t, "command: %s %v", command, args)
if command != "ceph" {
return "", errors.Errorf("unexpected command %q with args %v", command, args)
}
if args[0] == "auth" {
if args[1] == "get-or-create-key" {
return "{\"key\": \"does-not-matter\"}", nil
}
}
if args[0] == "osd" {
if args[1] == "ok-to-stop" {
return "", nil // no need to return text, only output status is used
}
if args[1] == "ls" {
// ceph osd ls returns an array of osd IDs like [0,1,2]
// build this based on the number of deployments since they should be equal
// for this test
l, err := clientset.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
t.Fatalf("failed to build 'ceph osd ls' output. %v", err)
}
num := len(l.Items)
a := []string{}
for i := 0; i < num; i++ {
a = append(a, strconv.Itoa(i))
}
return fmt.Sprintf("[%s]", strings.Join(a, ",")), nil
}
if args[1] == "tree" {
return `{"nodes":[{"id":-1,"name":"default","type":"root","type_id":11,"children":[-3]},{"id":-3,"name":"master","type":"host","type_id":1,"pool_weights":{},"children":[2,1,0]},{"id":0,"device_class":"hdd","name":"osd.0","type":"osd","type_id":0,"crush_weight":0.009796142578125,"depth":2,"pool_weights":{},"exists":1,"status":"up","reweight":1,"primary_affinity":1},{"id":1,"device_class":"hdd","name":"osd.1","type":"osd","type_id":0,"crush_weight":0.009796142578125,"depth":2,"pool_weights":{},"exists":1,"status":"up","reweight":1,"primary_affinity":1},{"id":2,"device_class":"hdd","name":"osd.2","type":"osd","type_id":0,"crush_weight":0.009796142578125,"depth":2,"pool_weights":{},"exists":1,"status":"up","reweight":1,"primary_affinity":1}],"stray":[]}`, nil
}
}
if args[0] == "versions" {
// the update deploy code only cares about the mons from the ceph version command results
v := `{"mon":{"ceph version 14.2.2 (somehash) nautilus (stable)":3}}`
return v, nil
}
return "", errors.Errorf("unexpected ceph command %q", args)
},
}
}
/*
* basic helper functions
*/
// node names for OSDs on PVC end up being the name of the PVC
func waitForNumPVCs(t *testing.T, clientset *fake.Clientset, namespace string, count int) {
for {
l, err := clientset.CoreV1().PersistentVolumeClaims(namespace).List(context.TODO(), metav1.ListOptions{})
assert.NoError(t, err)
if len(l.Items) >= count {
infof(t, "PVCs for OSDs on PVC all exist")
break
}
<-time.After(1 * time.Millisecond)
}
}
func waitForNumDeployments(t *testing.T, clientset *fake.Clientset, namespace string, count int) {
for {
l, err := clientset.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{})
assert.NoError(t, err)
if len(l.Items) >= count {
infof(t, "Deployments for OSDs on PVC all exist")
break
}
<-time.After(1 * time.Millisecond)
}
}
/*
* Unique and consistent OSD ID generator
*/
type osdIDGenerator struct {
nextOSDID int
osdIDMap map[string]int
}
func newOSDIDGenerator() osdIDGenerator {
return osdIDGenerator{
nextOSDID: 0,
osdIDMap: map[string]int{},
}
}
func (g *osdIDGenerator) osdID(t *testing.T, namedResource string) int {
if id, ok := g.osdIDMap[namedResource]; ok {
infof(t, "resource %q has existing OSD ID %d", namedResource, id)
return id
}
id := g.nextOSDID
g.osdIDMap[namedResource] = id
g.nextOSDID++
infof(t, "generated new OSD ID %d for resource %q", id, namedResource)
return id
}
/*
* resourceOperationList
* We want to keep track of the order in which some resources (notably deployments) are created and
* updated, and the tracker should be thread safe since there could be operations occurring in
* parallel.
*/
type resourceOperation struct {
resourceName string
operation string // e.g., "create", "update"
}
func newResourceOperation(resourceName, operation string) resourceOperation {
return resourceOperation{resourceName, operation}
}
type resourceOperationList struct {
sync.Mutex
resourceOps []resourceOperation
}
func newResourceOperationList() *resourceOperationList {
return &resourceOperationList{
sync.Mutex{},
[]resourceOperation{},
}
}
func (r *resourceOperationList) Add(resourceName, operation string) {
r.Lock()
defer r.Unlock()
r.resourceOps = append(r.resourceOps, newResourceOperation(resourceName, operation))
}
func (r *resourceOperationList) Len() int {
r.Lock()
defer r.Unlock()
return len(r.resourceOps)
}
func (r *resourceOperationList) List() []resourceOperation {
return r.resourceOps
}
// Return only the resources which have a given operation
func (r *resourceOperationList) ResourcesWithOperation(operation string) []string {
resources := []string{}
for _, ro := range r.List() {
if ro.operation == operation {
resources = append(resources, ro.resourceName)
}
}
return resources
}
// Return all operations in order without resource names
func (r *resourceOperationList) OperationsInOrder() []string {
ops := []string{}
for _, ro := range r.List() {
ops = append(ops, ro.operation)
}
return ops
}
|
package etcd
import (
"context"
"errors"
"strings"
kerrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/generic/registry"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/kubernetes/pkg/printers"
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
usergroup "github.com/openshift/api/user"
printersinternal "github.com/openshift/origin/pkg/printers/internalversion"
userapi "github.com/openshift/origin/pkg/user/apis/user"
"github.com/openshift/origin/pkg/user/apis/user/validation"
"github.com/openshift/origin/pkg/user/apiserver/registry/user"
)
// rest implements a RESTStorage for users against etcd
type REST struct {
*registry.Store
}
var _ rest.StandardStorage = &REST{}
// NewREST returns a RESTStorage object that will work against users
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) {
store := ®istry.Store{
NewFunc: func() runtime.Object { return &userapi.User{} },
NewListFunc: func() runtime.Object { return &userapi.UserList{} },
DefaultQualifiedResource: usergroup.Resource("users"),
TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)},
CreateStrategy: user.Strategy,
UpdateStrategy: user.Strategy,
DeleteStrategy: user.Strategy,
}
options := &generic.StoreOptions{RESTOptions: optsGetter}
if err := store.CompleteWithOptions(options); err != nil {
return nil, err
}
return &REST{store}, nil
}
// Get retrieves the item from etcd.
func (r *REST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
// "~" means the currently authenticated user
if name == "~" {
user, ok := apirequest.UserFrom(ctx)
if !ok || user.GetName() == "" {
return nil, kerrs.NewForbidden(usergroup.Resource("user"), "~", errors.New("requests to ~ must be authenticated"))
}
name = user.GetName()
contextGroups := sets.NewString(user.GetGroups()...).List() // sort and deduplicate
// build a virtual user object using the context data
virtualUser := &userapi.User{ObjectMeta: metav1.ObjectMeta{Name: name, UID: types.UID(user.GetUID())}, Groups: contextGroups}
if reasons := validation.ValidateUserName(name, false); len(reasons) != 0 {
// The user the authentication layer has identified cannot be a valid persisted user
// Return an API representation of the virtual user
return virtualUser, nil
}
// see if the context user exists in storage
obj, err := r.Store.Get(ctx, name, options)
// valid persisted user
if err == nil {
// copy persisted user
persistedUser := obj.(*userapi.User).DeepCopy()
// and mutate it to include the complete list of groups from the request context
persistedUser.Groups = virtualUser.Groups
// favor the UID on the request since that is what we actually base decisions on
if len(virtualUser.UID) != 0 {
persistedUser.UID = virtualUser.UID
}
return persistedUser, nil
}
// server is broken
if !kerrs.IsNotFound(err) {
return nil, kerrs.NewInternalError(err)
}
// impersonation, remote token authn, etc
return virtualUser, nil
}
// do not bother looking up users that cannot be persisted
if reasons := validation.ValidateUserName(name, false); len(reasons) != 0 {
return nil, field.Invalid(field.NewPath("metadata", "name"), name, strings.Join(reasons, ", "))
}
return r.Store.Get(ctx, name, options)
}
Return a status error on GET user with invalid name
The use of the kube:admin user against the API causes lookups
against the API that will result in "validation" errors:
apiserver received an error that is not an metav1.Status:
&field.Error{Type:"FieldValueInvalid", Field:"metadata.name",
BadValue:"kube:admin", Detail:"may not contain \":\""}
By returning a proper status error, we make sure that the API server
does not attempt to log these expected "failures."
Bug 1697206
Fixes #21632
Signed-off-by: Monis Khan <19f7b2d00144930eab99c4cfd2a8d2d2a225ef67@redhat.com>
package etcd
import (
"context"
"errors"
"strings"
kerrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/registry/generic/registry"
"k8s.io/apiserver/pkg/registry/rest"
"k8s.io/kubernetes/pkg/printers"
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
usergroup "github.com/openshift/api/user"
printersinternal "github.com/openshift/origin/pkg/printers/internalversion"
userapi "github.com/openshift/origin/pkg/user/apis/user"
"github.com/openshift/origin/pkg/user/apis/user/validation"
"github.com/openshift/origin/pkg/user/apiserver/registry/user"
)
// rest implements a RESTStorage for users against etcd
type REST struct {
*registry.Store
}
var _ rest.StandardStorage = &REST{}
// NewREST returns a RESTStorage object that will work against users
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) {
store := ®istry.Store{
NewFunc: func() runtime.Object { return &userapi.User{} },
NewListFunc: func() runtime.Object { return &userapi.UserList{} },
DefaultQualifiedResource: usergroup.Resource("users"),
TableConvertor: printerstorage.TableConvertor{TablePrinter: printers.NewTablePrinter().With(printersinternal.AddHandlers)},
CreateStrategy: user.Strategy,
UpdateStrategy: user.Strategy,
DeleteStrategy: user.Strategy,
}
options := &generic.StoreOptions{RESTOptions: optsGetter}
if err := store.CompleteWithOptions(options); err != nil {
return nil, err
}
return &REST{store}, nil
}
// Get retrieves the item from etcd.
func (r *REST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
// "~" means the currently authenticated user
if name == "~" {
user, ok := apirequest.UserFrom(ctx)
if !ok || user.GetName() == "" {
return nil, kerrs.NewForbidden(usergroup.Resource("user"), "~", errors.New("requests to ~ must be authenticated"))
}
name = user.GetName()
contextGroups := sets.NewString(user.GetGroups()...).List() // sort and deduplicate
// build a virtual user object using the context data
virtualUser := &userapi.User{ObjectMeta: metav1.ObjectMeta{Name: name, UID: types.UID(user.GetUID())}, Groups: contextGroups}
if reasons := validation.ValidateUserName(name, false); len(reasons) != 0 {
// The user the authentication layer has identified cannot be a valid persisted user
// Return an API representation of the virtual user
return virtualUser, nil
}
// see if the context user exists in storage
obj, err := r.Store.Get(ctx, name, options)
// valid persisted user
if err == nil {
// copy persisted user
persistedUser := obj.(*userapi.User).DeepCopy()
// and mutate it to include the complete list of groups from the request context
persistedUser.Groups = virtualUser.Groups
// favor the UID on the request since that is what we actually base decisions on
if len(virtualUser.UID) != 0 {
persistedUser.UID = virtualUser.UID
}
return persistedUser, nil
}
// server is broken
if !kerrs.IsNotFound(err) {
return nil, kerrs.NewInternalError(err)
}
// impersonation, remote token authn, etc
return virtualUser, nil
}
// do not bother looking up users that cannot be persisted
// make sure we return a status error otherwise the API server will complain
if reasons := validation.ValidateUserName(name, false); len(reasons) != 0 {
err := field.Invalid(field.NewPath("metadata", "name"), name, strings.Join(reasons, ", "))
return nil, kerrs.NewInvalid(usergroup.Kind("User"), name, field.ErrorList{err})
}
return r.Store.Get(ctx, name, options)
}
|
package godless
import (
"crypto/sha256"
"github.com/pkg/errors"
)
type SemiLattice interface {
Join(other SemiLattice) (SemiLattice, error)
}
// Semi-lattice type that implements our storage
type Namespace struct {
Objects map[string]Object
}
func MakeNamespace() *Namespace {
return &Namespace{
Objects: map[string]Object{},
}
}
func (ns *Namespace) JoinNamespace(other *Namespace) (*Namespace, error) {
build := map[string]Object{}
for k, v := range ns.Objects {
build[k] = v
}
for k, otherv := range other.Objects {
v, present := ns.Objects[k]
if present {
joined, err := v.JoinObject(otherv)
if err != nil {
return nil, errors.Wrap(err, "Error in Namespace join")
}
build[k] = joined
} else {
build[k] = otherv
}
}
return &Namespace{Objects: build}, nil
}
func (ns *Namespace) Join(other SemiLattice) (SemiLattice, error) {
if ons, ok := other.(*Namespace); ok {
return ns.JoinNamespace(ons)
}
return nil, errors.New("Expected *Namespace in Join")
}
func (ns *Namespace) JoinObject(key string, obj Object) error {
joined, err := obj.JoinObject(ns.Objects[key])
if err != nil {
return errors.Wrap(err, "Namespace JoinObject failed")
}
ns.Objects[key] = joined
return nil
}
// type ObjType uint8
//
// const (
// SET = ObjType(iota)
// MAP
// )
// TODO improved type validation
type Object struct {
// Type ObjType
Obj SemiLattice
}
func (o Object) JoinObject(other Object) (Object, error) {
// if o.Type != other.Type {
// return Object{}, fmt.Errorf("Expected Object of type '%v' but got '%v'", o.Type, other.Type)
// }
// Zero value makes join easy
zero := Object{}
if other.Obj == zero {
return o, nil
}
joined, err := o.Obj.Join(other.Obj)
if err != nil {
return Object{}, err
}
out := Object{
// Type: o.Type,
Obj: joined,
}
return out, nil
}
func (o Object) Join(other SemiLattice) (SemiLattice, error) {
otherobj, ok := other.(Object)
if !ok {
return nil, errors.New("Expected Object in Join")
}
return o.JoinObject(otherobj)
}
type Set struct {
Members []string
}
func (set Set) JoinSet(other Set) Set {
// Handle zero value
if len(other.Members) == 0 {
return set
}
build := Set{
Members: append(set.Members, other.Members...),
}
return build.uniq()
}
func (set Set) Join(other SemiLattice) (SemiLattice, error) {
if os, ok := other.(Set); ok {
return set.JoinSet(os), nil
}
return nil, errors.New("Expected Set in Join")
}
func (set Set) uniq() Set {
return Set{Members: uniq256(set.Members)}
}
type Map struct {
Members map[string][]string
}
func (m Map) JoinMap(other Map) Map {
// Handle zero value
if len(other.Members) == 0 {
return m
}
build := map[string][]string{}
for k, v := range m.Members {
build[k] = v
}
for k, v := range other.Members {
initv, present := m.Members[k]
if present {
build[k] = append(initv, v...)
} else {
build[k] = v
}
}
ret := Map{Members: build}
return ret.uniq()
}
func (m Map) Join(other SemiLattice) (SemiLattice, error) {
if om, ok := other.(Map); ok {
return m.JoinMap(om), nil
}
return nil, errors.New("Expected Map in Join")
}
func (m Map) uniq() Map {
build := map[string][]string{}
for k, vs := range m.Members {
build[k] = uniq256(vs)
}
return Map{Members: build}
}
// uniq256 deduplicates a slice of strings using sha256.
func uniq256(dups []string) []string {
dedup := map[[sha256.Size]byte]string{}
for _, s := range dups {
bs := []byte(s)
k := sha256.Sum256(bs)
if _, present := dedup[k]; !present {
dedup[k] = s
}
}
out := []string{}
for _, v := range dedup {
out = append(out, v)
}
return out
}
tidy up commented out stuff
package godless
import (
"crypto/sha256"
"github.com/pkg/errors"
)
type SemiLattice interface {
Join(other SemiLattice) (SemiLattice, error)
}
// Semi-lattice type that implements our storage
type Namespace struct {
Objects map[string]Object
}
func MakeNamespace() *Namespace {
return &Namespace{
Objects: map[string]Object{},
}
}
func (ns *Namespace) JoinNamespace(other *Namespace) (*Namespace, error) {
build := map[string]Object{}
for k, v := range ns.Objects {
build[k] = v
}
for k, otherv := range other.Objects {
v, present := ns.Objects[k]
if present {
joined, err := v.JoinObject(otherv)
if err != nil {
return nil, errors.Wrap(err, "Error in Namespace join")
}
build[k] = joined
} else {
build[k] = otherv
}
}
return &Namespace{Objects: build}, nil
}
func (ns *Namespace) Join(other SemiLattice) (SemiLattice, error) {
if ons, ok := other.(*Namespace); ok {
return ns.JoinNamespace(ons)
}
return nil, errors.New("Expected *Namespace in Join")
}
func (ns *Namespace) JoinObject(key string, obj Object) error {
joined, err := obj.JoinObject(ns.Objects[key])
if err != nil {
return errors.Wrap(err, "Namespace JoinObject failed")
}
ns.Objects[key] = joined
return nil
}
// TODO improved type validation
type Object struct {
Obj SemiLattice
}
func (o Object) JoinObject(other Object) (Object, error) {
// Zero value makes join easy
zero := Object{}
if other.Obj == zero {
return o, nil
}
joined, err := o.Obj.Join(other.Obj)
if err != nil {
return Object{}, err
}
out := Object{
Obj: joined,
}
return out, nil
}
func (o Object) Join(other SemiLattice) (SemiLattice, error) {
otherobj, ok := other.(Object)
if !ok {
return nil, errors.New("Expected Object in Join")
}
return o.JoinObject(otherobj)
}
type Set struct {
Members []string
}
func (set Set) JoinSet(other Set) Set {
// Handle zero value
if len(other.Members) == 0 {
return set
}
build := Set{
Members: append(set.Members, other.Members...),
}
return build.uniq()
}
func (set Set) Join(other SemiLattice) (SemiLattice, error) {
if os, ok := other.(Set); ok {
return set.JoinSet(os), nil
}
return nil, errors.New("Expected Set in Join")
}
func (set Set) uniq() Set {
return Set{Members: uniq256(set.Members)}
}
type Map struct {
Members map[string][]string
}
func (m Map) JoinMap(other Map) Map {
// Handle zero value
if len(other.Members) == 0 {
return m
}
build := map[string][]string{}
for k, v := range m.Members {
build[k] = v
}
for k, v := range other.Members {
initv, present := m.Members[k]
if present {
build[k] = append(initv, v...)
} else {
build[k] = v
}
}
ret := Map{Members: build}
return ret.uniq()
}
func (m Map) Join(other SemiLattice) (SemiLattice, error) {
if om, ok := other.(Map); ok {
return m.JoinMap(om), nil
}
return nil, errors.New("Expected Map in Join")
}
func (m Map) uniq() Map {
build := map[string][]string{}
for k, vs := range m.Members {
build[k] = uniq256(vs)
}
return Map{Members: build}
}
// uniq256 deduplicates a slice of strings using sha256.
func uniq256(dups []string) []string {
dedup := map[[sha256.Size]byte]string{}
for _, s := range dups {
bs := []byte(s)
k := sha256.Sum256(bs)
if _, present := dedup[k]; !present {
dedup[k] = s
}
}
out := []string{}
for _, v := range dedup {
out = append(out, v)
}
return out
}
|
package nests
import (
"math"
"math/rand"
"github.com/freignat91/mlearning/network"
)
var visionNb = 8
var inNb = 8 * 4 //8 for ants, 8 for foods, 8 for pheromones, 8 for hostile
var outNb = 8
var modes = []string{"free", "spread", "found food", "piste up food", "hostile"}
var deltaBase = 1 // number of train to do for the first partition 0 or 1
var deltaCoef = 1
var updateNetwork = true
// Ant .
type Ant struct {
ID int `json:"id"`
X float64 `json:"x"`
Y float64 `json:"y"`
Direction int `json:"direction"`
Fight bool `json:"fight"`
AntType int `json:"type"` //worker=0, soldier=1
Life int `json:"life"`
//
nest *Nest
happiness float64
lastHappiness float64
regularSpeed float64
maxSpeed float64
speed float64
vision float64
dx float64
dy float64
soldierInitCounter int
network *network.MLNetwork
entries []float64
lastEntries []float64
outs []float64
lastDecision int
lastLastDecision int
lastResult bool
lastDecisionRandom bool
panic bool
statDecision *Stats
statGoodDecision *Stats
statReinforce *Stats
statFade *Stats
gRate float64
dirMap []int
dirCount int
food *Food
pheromoneDelay int
pheromoneCount int
lastPheromone int
lastPheromoneCount int
lastEntryMode int
timeWithoutHostile int
trained bool
pursue *Ant
mode int
lastMode int
//
decTmp []int
//happinnessDeltaMax float64
//happinnessDeltaMaxTmp float64
happinessDeltaSum [5]float64
happinessDeltaNumber [5]int
averageHappinessDelta [5]float64
//debug
index int
tmpFood *Food
lastx float64
lasty float64
}
func newAnt(ns *Nests, n *Nest, antType int) *Ant {
n.antIDCounter++
ant := &Ant{
ID: n.antIDCounter,
X: n.x + 20.0 - rand.Float64()*40,
Y: n.y + 20.0 - rand.Float64()*40,
AntType: antType,
nest: n,
vision: 30,
Direction: int(rand.Int31n(int32(outNb))),
entries: make([]float64, inNb, inNb),
lastEntries: make([]float64, inNb, inNb),
outs: make([]float64, outNb, outNb),
lastDecision: -1,
statDecision: newStats(n.statDecision),
statGoodDecision: newStats(n.statGoodDecision),
statReinforce: newStats(n.statReinforce),
statFade: newStats(n.statFade),
dirMap: make([]int, outNb, outNb),
decTmp: make([]int, outNb/2, outNb/2),
}
ant.setNetwork(ns)
return ant
}
func newAntWorker(ns *Nests, n *Nest, x float64, y float64, direction int) *Ant {
ant := newAnt(ns, n, 0)
ant.X = x
ant.Y = y
ant.Life = n.parameters.workerLife
ant.regularSpeed = n.parameters.workerMinSpeed + rand.Float64()*.1
ant.maxSpeed = n.parameters.workerMaxSpeed + rand.Float64()*.1
ant.speed = ant.regularSpeed
ant.Direction = direction
return ant
}
func newAntSoldier(ns *Nests, n *Nest, x float64, y float64, dx float64, dy float64, direction int) *Ant {
ant := newAnt(ns, n, 1)
ant.X = x
ant.Y = y
ant.dx = dx
ant.dy = dy
if dx != 0 && dy != 0 {
ant.soldierInitCounter = n.parameters.soldierInitCounter
}
ant.Life = n.parameters.soldierLife
ant.regularSpeed = n.parameters.soldierMinSpeed + rand.Float64()*.1
ant.maxSpeed = n.parameters.soldierMaxSpeed + rand.Float64()*.1
ant.speed = ant.regularSpeed
ant.Direction = direction
return ant
}
func (a *Ant) decrLife(ns *Nests, val int) {
if a.Life > 0 {
a.Life -= val
if a.Life <= 0 {
a.Life = 0
}
}
}
func (a *Ant) setNetwork(ns *Nests) {
if updateNetwork {
if a.AntType == 0 {
if a.nest.bestWorker != nil && rand.Float64() < a.nest.parameters.chanceToGetTheBestNetworkCopy {
net, err := a.nest.bestWorker.network.Copy()
if err == nil {
a.network = net
return
}
}
} else {
if a.nest.bestSoldier != nil && rand.Float64() < a.nest.parameters.chanceToGetTheBestNetworkCopy {
net, err := a.nest.bestSoldier.network.Copy()
if err == nil {
a.network = net
return
}
}
}
}
var defnet []int
if rand.Float64() < 0.5 || true {
defnet = make([]int, 3, 3)
defnet[0] = inNb
defnet[1] = int(5 + rand.Int31n(45))
defnet[2] = outNb
} else {
defnet = make([]int, 4, 4)
defnet[0] = inNb
defnet[1] = int(5 + rand.Int31n(45))
defnet[2] = int(5 + rand.Int31n(25))
defnet[3] = outNb
}
net, _ := network.NewNetwork(defnet)
a.network = net
}
func (a *Ant) nextTime(ns *Nests, update bool) {
if !a.tickLife(ns) {
return
}
a.lastLastDecision = a.lastDecision
a.lastDecision = a.Direction
a.displayInfo1(ns)
a.moveOnOut(ns)
if a.panic {
return
}
a.updateEntries(ns)
a.computeHappiness(ns)
a.displayInfo2(ns)
if ns.log && a == ns.selected {
a.printf(ns, "mode: %d entries: %s\n", a.mode, a.displayList(ns, a.entries, "%.2f"))
a.printf(ns, "happiness=%.3f\n", a.happiness)
}
if a.happiness == a.lastHappiness && a.happiness >= 0 {
a.printf(ns, "decision: no need\n")
} else if a.happiness < a.lastHappiness {
a.printf(ns, "bad last decision: %d\n", a.lastDecision)
if rand.Float64() > 1-(a.gRate-5)/100 && (a.lastResult || a.lastDecision != a.lastLastDecision) {
a.decide(ns)
a.printf(ns, "decision using network: %d\n", a.Direction)
a.lastDecisionRandom = false
a.statDecision.incr()
} else {
a.Direction = int(rand.Int31n(int32(outNb)))
//a.Direction = a.index + 4
//if a.Direction >= outNb {
//a.Direction = a.Direction - outNb
//}
a.dirMap[a.Direction]++
a.lastDecisionRandom = true
a.statDecision.incr()
a.printf(ns, "decision random: %d\n", a.Direction)
}
a.lastResult = false
} else if a.lastDecision != -1 && a.mode == a.lastMode {
a.statGoodDecision.incr()
a.lastResult = true
a.printf(ns, "decision: no need\n")
delta := (a.happiness - a.lastHappiness)
if delta > 0 && (delta < a.averageHappinessDelta[a.mode]*3 || a.averageHappinessDelta[a.mode] == 0) {
a.happinessDeltaSum[a.mode] += delta
a.happinessDeltaNumber[a.mode]++
}
a.printf(ns, "positive decision %d, delta=%.4f average=%.4f\n", a.lastDecision, delta, a.averageHappinessDelta)
_, ok := a.train(ns)
if ok {
a.printf(ns, "positive decision reinforced\n")
a.statReinforce.incr()
a.lastDecision = -1
}
}
if update {
a.update(ns)
}
}
func (a *Ant) tickLife(ns *Nests) bool {
if a.Life < 0 {
return false
}
if a.AntType == 0 {
if ns.timeRef%a.nest.parameters.workerLifeDecrPeriod == 0 {
a.decrLife(ns, 1)
if a.Life <= 0 {
return false
}
}
return true
}
if ns.timeRef%a.nest.parameters.soldierLifeDecrPeriod == 0 {
a.decrLife(ns, 1)
if a.Life <= 0 {
return false
}
}
return true
}
func (a *Ant) update(ns *Nests) {
if updateNetwork && ns.timeRef%5000 == 0 {
bestAnt := a.nest.bestWorker
if a.AntType == 1 {
bestAnt = a.nest.bestSoldier
}
if a.dirCount < bestAnt.dirCount-3 || (a.dirCount <= bestAnt.dirCount && a.gRate < bestAnt.gRate-10) {
net, err := bestAnt.network.Copy()
if err == nil {
a.network = net
a.lastDecision = -1
a.printf(ns, "update network with the best one: %v\n", a.network.Getdef())
}
}
}
}
func (a *Ant) decide(ns *Nests) bool {
if a.food != nil {
return false
}
ins, ok := a.preparedEntries(a.entries)
if !ok {
a.printf(ns, "bad entries: %s\n", a.displayList(ns, ins, "%.3f"))
return false
}
a.outs = a.network.Propagate(a.entries, true)
if ns.log {
a.printf(ns, "Compute decision, propagation: %s\n", a.displayList(ns, a.outs, "%.3f"))
}
direction := 0
max := 0.0
for ii, out := range a.outs {
if out > max {
max = out
direction = ii
}
}
a.Direction = direction
a.dirMap[direction]++
return true
}
func (a *Ant) commitPeriodStats(ns *Nests) {
if ns.stopped {
return
}
nb := 0
for _, val := range a.dirMap {
if val > 0 {
nb++
}
}
a.dirCount = nb
a.dirMap = make([]int, outNb, outNb)
//
for ii := 1; ii < 5; ii++ {
a.averageHappinessDelta[ii] = 0
if a.happinessDeltaNumber[ii] > 0 {
a.averageHappinessDelta[ii] = a.happinessDeltaSum[ii] / float64(a.happinessDeltaNumber[ii])
}
a.happinessDeltaSum[ii] = 0
a.happinessDeltaNumber[ii] = 0
}
if a.statDecision.value != 0 {
a.statDecision.push()
a.statGoodDecision.push()
a.statReinforce.push()
a.statFade.push()
a.gRate = float64(a.statGoodDecision.cumul) * 100.0 / float64(a.statDecision.cumul)
if a.gRate > 100 {
a.gRate = 100
}
}
}
func (a *Ant) updateEntries(ns *Nests) {
if ns.timeRef%100 == 0 {
a.Fight = false
}
for ii := range a.entries {
a.lastEntries[ii] = a.entries[ii]
a.entries[ii] = 0
}
if a.food != nil {
a.mode = 0
a.printf(ns, "Carry food no entries\n")
return
}
a.lastMode = a.mode
a.mode = 0
if a.updateEntriesForHostileAnts(ns) {
a.mode = 4
return
}
if a.AntType == 0 {
if a.updateEntriesForFoods(ns) {
a.mode = 2
return
}
if a.updateEntriesForPheromones(ns) {
a.mode = 3
return
}
}
a.updateEntriesForFriendAnts(ns)
a.mode = 1
}
func (a *Ant) updateEntriesForFoods(ns *Nests) bool {
dist2Max := a.vision * a.vision
dist2m := dist2Max
var foodMin *Food
for _, food := range ns.foods {
if !food.carried {
dist2 := (food.X-a.X)*(food.X-a.X) + (food.Y-a.Y)*(food.Y-a.Y)
if dist2 < dist2m {
foodMin = food
dist2m = dist2
}
}
}
a.printf(ns, "closest food: %+v\n", foodMin)
if foodMin != nil {
if dist2m < 5 {
a.carryFood(foodMin)
a.pheromoneCount = 0
return true
}
a.tmpFood = foodMin
index := a.getDirection(ns, foodMin.X, foodMin.Y)
a.entries[visionNb+index] = ((dist2Max - dist2m) / dist2Max)
return true
}
return false
}
func (a *Ant) updateEntriesForPheromones(ns *Nests) bool {
//minLevel := a.nest.parameters.pheromoneLevel + 1
minLevel := 1000000
dist2Max := a.vision * a.vision * 9
dist2m := dist2Max
var pheMin *Pheromone
for _, phe := range a.nest.pheromones {
if phe.Level > 0 {
dist2 := (phe.X-a.X)*(phe.X-a.X) + (phe.Y-a.Y)*(phe.Y-a.Y)
if dist2 < dist2Max && phe.id < minLevel {
minLevel = phe.id
index := a.getDirection(ns, phe.X, phe.Y)
//if ns.selected == a {
// fmt.Printf("Pheromone direction: %d\n", index)
//}
pheMin = phe
a.entries[visionNb*2+index] += (a.nest.parameters.pheromoneLevel - float64(phe.Level)) * ((dist2Max - dist2) / dist2Max)
if dist2 < dist2m {
pheMin = phe
dist2m = dist2
}
}
}
}
a.printf(ns, "pheromone: %+v\n", pheMin)
if pheMin != nil {
if a.lastPheromone == pheMin.id {
a.lastPheromoneCount++
} else {
a.lastPheromoneCount = 0
}
a.lastPheromone = pheMin.id
if a.lastPheromoneCount > 200 {
a.printf(ns, "same pheromone too much time: ignored\n")
for ii := visionNb * 2; ii < visionNb*3; ii++ {
a.entries[ii] = 0
}
}
return true
}
return false
}
func (a *Ant) updateEntriesForHostileAnts(ns *Nests) bool {
if a.AntType == 0 && !panicMode {
return false
}
dist2Max := a.vision * a.vision
dist2Contact := dist2Max / 8
if a.AntType == 1 {
dist2Max = dist2Max * 16
}
//pursue mode
if a.AntType == 1 && a.pursue != nil {
if a.pursue.Life > 0 {
a.printf(ns, "Pursue ant: %d\n", a.pursue.ID)
dist2m := a.distAnt2(a.pursue)
if dist2m > dist2Max*4 {
a.pursue = nil
} else if dist2m < dist2Contact {
a.pursue.decrLife(ns, 2)
index := a.getDirection(ns, a.pursue.X, a.pursue.Y)
a.entries[visionNb*3+index] = ((dist2Max - dist2m) / dist2Max)
return true
} else {
index := a.getDirection(ns, a.pursue.X, a.pursue.Y)
a.entries[visionNb*3+index] = ((dist2Max*4 - dist2m) / dist2Max / 4)
return true
}
} else {
a.pursue = nil
}
}
a.speed = a.regularSpeed
a.timeWithoutHostile++
var antMin *Ant
dist2m := dist2Max
for _, nest := range ns.nests {
if nest.id != a.nest.id {
for _, ant := range nest.ants {
if ant.Life > 0 {
dist2 := a.distAnt2(ant)
if dist2 < dist2m {
antMin = ant
dist2m = dist2
}
}
}
}
}
a.printf(ns, "closest hostile: %+v\n", antMin)
if antMin != nil {
a.soldierInitCounter = 0
a.timeWithoutHostile = 0
if a.AntType == 1 {
a.pursue = antMin
a.speed = a.maxSpeed
}
if dist2m < dist2Contact {
a.Fight = true
antMin.Fight = true
if a.AntType == 1 {
antMin.decrLife(ns, 2)
a.decrLife(ns, 1)
}
}
if a.AntType == 0 {
if rand.Float64() < 0.1 && (a.X-a.nest.x)*(a.X-a.nest.x)+(a.Y-a.nest.y)*(a.Y-a.nest.y) > 4000 {
a.printf(ns, "current ant panic mode\n")
a.panic = true
a.Fight = false
if a.food != nil {
a.dropFood(ns)
}
}
}
index := a.getDirection(ns, antMin.X, antMin.Y)
a.entries[visionNb*3+index] = ((dist2Max - dist2m) / dist2Max)
return true
}
return false
}
func (a *Ant) updateEntriesForFriendAnts(ns *Nests) bool {
dist2Max := a.vision * a.vision
dist2m := dist2Max
var antMin *Ant
if a.AntType == 0 {
for _, ant := range a.nest.ants {
if ant.Life > 0 && ant.AntType == 0 && ant != a && ant.food == nil {
dist2 := a.distAnt2(ant)
if dist2 < dist2Max/16 {
}
if dist2 < dist2m {
antMin = ant
dist2m = dist2
}
}
}
}
if a.AntType == 1 && a.timeWithoutHostile > 5000 {
for _, ant := range a.nest.ants {
if ant.Life > 0 && ant != a && ant.food == nil {
dist2 := a.distAnt2(ant)
if dist2 < dist2m {
antMin = ant
dist2m = dist2
}
}
}
for _, ant := range a.nest.ants {
if ant.Life > 0 && ant != a && ant.food == nil {
dist2 := a.distAnt2(ant)
if dist2 < dist2Max {
antMin = ant
dist2m = dist2
index := a.getDirection(ns, antMin.X, antMin.Y)
a.entries[index] += ((dist2Max - dist2m) / dist2Max)
}
}
}
}
a.printf(ns, "closest friend: %+v\n", antMin)
if antMin != nil {
index := a.getDirection(ns, antMin.X, antMin.Y)
a.index = index
a.entries[index] = ((dist2Max - dist2m) / dist2Max)
return true
}
return false
}
func (a *Ant) getDirection(ns *Nests, x float64, y float64) int {
ang := math.Atan2(x-a.X, y-a.Y)
if ang < 0 {
ang = 2*math.Pi + ang
}
index := int(ang/(math.Pi*2.0/float64(outNb))) + 1
if index >= outNb {
return index - outNb
}
return index
}
func (a *Ant) computeHappiness(ns *Nests) {
a.lastHappiness = a.happiness
a.happiness = 0
for ii, val := range a.entries {
if ii >= 0 && ii < visionNb {
a.happiness -= val
} else {
a.happiness += val
}
}
}
func (a *Ant) getDirIndex(nn int) int {
if nn >= outNb {
return nn - outNb
}
if nn < 0 {
return nn + outNb
}
return nn
}
func (a *Ant) moveOnOut(ns *Nests) {
a.lastx = a.X
a.lasty = a.Y
//for now the nest return is hard coded
if a.food != nil || a.panic {
a.moveToNest(ns)
return
}
if a.AntType == 1 && a.soldierInitCounter > 0 {
a.soldierInitCounter--
a.X += a.dx * a.speed
a.Y += a.dy * a.speed
} else {
angle := (math.Pi * 2 * float64(a.Direction)) / float64(outNb) //+ math.Pi/2
a.X += math.Sin(angle) * a.speed
a.Y += math.Cos(angle) * a.speed
}
max := 2.0
if a.X < ns.xmin*max {
//a.X = ns.xmax
a.X = ns.xmin * max
a.Direction = 1 + int(rand.Intn(outNb/4+1))
} else if a.Y < ns.ymin*max {
//a.Y = ns.ymax
a.Y = ns.ymin * max
a.Direction = 7 + int(rand.Intn(outNb/4+1))
} else if a.X > ns.xmax*max {
//a.X = ns.xmin
a.X = ns.xmax * max
a.Direction = 5 + int(rand.Intn(outNb/4+1))
} else if a.Y > ns.ymax*max {
//a.Y = ns.ymin
a.Y = ns.ymax * max
a.Direction = 3 + int(rand.Intn(outNb/4+1))
}
if a.Direction >= outNb {
a.Direction = a.Direction - outNb
}
}
func (a *Ant) moveToNest(ns *Nests) {
speed := a.speed
if a.panic {
a.Fight = false
speed = speed * 2
}
dd := math.Sqrt(float64((a.nest.x-a.X)*(a.nest.x-a.X) + (a.nest.y-a.Y)*(a.nest.y-a.Y)))
dx := (a.nest.x - a.X) / dd
dy := (a.nest.y - a.Y) / dd
a.Direction = a.getDirection(ns, a.nest.x, a.nest.y)
a.X += dx * speed
a.Y += dy * speed
if a.food != nil {
if a.nest.id == 1 {
a.food.X = a.X
a.food.Y = a.Y
} else {
a.food.X = a.X + 1
a.food.Y = a.Y + 1
}
a.pheromoneDelay--
if a.pheromoneDelay <= 0 {
a.printf(ns, "add food pheromone\n")
a.pheromoneCount++
a.nest.addPheromone(a.X, a.Y, a.pheromoneCount)
a.pheromoneDelay = a.nest.parameters.pheromoneAntDelay
}
}
if (a.nest.x-a.X)*(a.nest.x-a.X)+(a.nest.y-a.Y)*(a.nest.y-a.Y) < 1600 {
direc := a.Direction + outNb/2
if direc >= outNb {
direc = direc - outNb
}
if a.panic {
a.nest.addSoldier(ns, a.X, a.Y, -dx, -dy, direc)
a.panic = false
}
if a.food != nil {
a.nest.ressource += 4
if len(ns.foodGroups) > 0 {
if foodRenew {
a.food.renew()
}
}
a.Direction = direc
a.nest.addWorker(ns, a.X, a.Y, direc)
a.dropFood(ns)
}
}
}
func (a *Ant) train(ns *Nests) (int, bool) {
if a.lastDecision < 0 {
return 0, false
}
if a.trained {
return 0, true
}
nb := a.getNbTrain(ns)
if nb == 0 {
return 0, false
}
ins, ok := a.preparedEntries(a.lastEntries)
if !ok {
return 0, false
}
if !a.lastDecisionRandom {
return 0, true
}
//train a much as the decision appears good concidering delta happiness stats
for ii := 0; ii < nb; ii++ {
a.network.Propagate(ins, false)
a.setOuts(a.lastDecision)
if a == ns.selected {
ns.addSample(ins, a.outs)
}
a.network.BackPropagate(a.outs)
}
return nb, true
}
func (a *Ant) getNbTrain(ns *Nests) int {
delta := a.happiness - a.lastHappiness
var ret int
if a.averageHappinessDelta[a.mode] <= 0 {
ret = 0
} else if delta < a.averageHappinessDelta[a.mode]/2 {
ret = deltaBase
} else if delta < a.averageHappinessDelta[a.mode] {
ret = deltaBase
} else if delta < a.averageHappinessDelta[a.mode]*1.5 {
ret = deltaBase + 1
} else {
ret = deltaBase + 1
}
//a.printf(ns, "Positive decision, delta=%.3f average=%.3f max=%.3f nbTrain=%d\n", delta, a.averageHappinessDelta, a.happinnessDeltaMax, ret)
a.printf(ns, "Positive decision, delta=%.3f average=%.3f nbTrain=%d\n", delta, a.averageHappinessDelta, ret)
return ret * deltaCoef
}
func (a *Ant) fadeLastDecision(ns *Nests) bool {
if a.lastDecision == -1 || a.trained {
return false
}
ins, ok := a.preparedEntries(a.lastEntries)
if !ok {
return false
}
outs := a.network.Propagate(ins, true)
a.setOutsFaded(a.lastDecision)
if ns.log && a == ns.selected {
a.computeTrainResult(a.outs, outs)
a.printf(ns, "fade last decision: %d\n", a.lastDecision)
ns.addSample(ins, a.outs)
}
a.network.BackPropagate(a.outs)
return true
}
func (a *Ant) preparedEntries(list []float64) ([]float64, bool) {
ret := make([]float64, len(list), len(list))
isNull := true
for ii, val := range list {
ret[ii] = val
if val > 0 {
ret[ii] = 1 //val
isNull = false
}
}
return ret, !isNull
}
func (a *Ant) setOuts(direction int) {
for ii := range a.outs {
a.outs[ii] = 0
}
a.outs[direction] = 1
}
func (a *Ant) setOutsFaded(lastDecision int) {
for ii := range a.outs {
a.outs[ii] = 0.2
}
a.outs[lastDecision] = 0
}
func (a *Ant) dropFood(ns *Nests) {
if a.food != nil {
if foodRenew {
a.food.carried = false
}
a.food = nil
}
}
func (a *Ant) carryFood(f *Food) {
if !f.carried {
a.food = f
f.carried = true
}
}
func (a *Ant) displayInfo1(ns *Nests) {
if ns.log && ns.selected == a {
a.printf(ns, "-----------------------------------------------------\n")
ggRate := float64(a.statReinforce.scumul) * 100.0 / float64(a.statDecision.scumul)
a.printf(ns, "%d:[%d] type=%d reinforce:%d decision:%d period:good=%.2f%%) global:good=%.2f%%)\n", ns.timeRef, a.ID, a.AntType, a.statReinforce.cumul, a.statDecision.cumul, a.gRate, ggRate)
a.printf(ns, "network=%v hapiness=%.5f direction: %d last decision: %d (%d) result: %t\n", a.network.Getdef(), a.happiness, a.Direction, a.lastDecision, a.lastLastDecision, a.lastResult)
}
}
func (a *Ant) displayInfo2(ns *Nests) {
if ns.log && ns.selected == a {
a.printf(ns, "delta=%.4f average=%.4f\n", a.happiness-a.lastHappiness, a.averageHappinessDelta)
}
}
func (a *Ant) getModeToString() string {
if a.panic {
return "panic"
}
if a.food != nil {
return "carry food"
}
if a.AntType == 0 {
if a.Fight {
return "attacked"
}
} else {
if a.Fight {
return "attack"
}
if a.pursue != nil {
return "pursue"
}
}
return modes[a.mode]
}
update mode 3
package nests
import (
"math"
"math/rand"
"github.com/freignat91/mlearning/network"
)
var visionNb = 8
var inNb = 8 * 4 //8 for ants, 8 for foods, 8 for pheromones, 8 for hostile
var outNb = 8
var modes = []string{"free", "spread", "found food", "piste up food", "hostile"}
var deltaBase = 1 // number of train to do for the first partition 0 or 1
var deltaCoef = 1
var updateNetwork = true
// Ant .
type Ant struct {
ID int `json:"id"`
X float64 `json:"x"`
Y float64 `json:"y"`
Direction int `json:"direction"`
Fight bool `json:"fight"`
AntType int `json:"type"` //worker=0, soldier=1
Life int `json:"life"`
//
nest *Nest
happiness float64
lastHappiness float64
regularSpeed float64
maxSpeed float64
speed float64
vision float64
dx float64
dy float64
soldierInitCounter int
network *network.MLNetwork
entries []float64
lastEntries []float64
outs []float64
lastDecision int
lastLastDecision int
lastResult bool
lastDecisionRandom bool
panic bool
statDecision *Stats
statGoodDecision *Stats
statReinforce *Stats
statFade *Stats
gRate float64
dirMap []int
dirCount int
food *Food
pheromoneDelay int
pheromoneCount int
lastPheromone int
lastPheromoneCount int
lastEntryMode int
timeWithoutHostile int
trained bool
pursue *Ant
mode int
lastMode int
//
decTmp []int
//happinnessDeltaMax float64
//happinnessDeltaMaxTmp float64
happinessDeltaSum [5]float64
happinessDeltaNumber [5]int
averageHappinessDelta [5]float64
//debug
index int
tmpFood *Food
lastx float64
lasty float64
}
func newAnt(ns *Nests, n *Nest, antType int) *Ant {
n.antIDCounter++
ant := &Ant{
ID: n.antIDCounter,
X: n.x + 20.0 - rand.Float64()*40,
Y: n.y + 20.0 - rand.Float64()*40,
AntType: antType,
nest: n,
vision: 30,
Direction: int(rand.Int31n(int32(outNb))),
entries: make([]float64, inNb, inNb),
lastEntries: make([]float64, inNb, inNb),
outs: make([]float64, outNb, outNb),
lastDecision: -1,
statDecision: newStats(n.statDecision),
statGoodDecision: newStats(n.statGoodDecision),
statReinforce: newStats(n.statReinforce),
statFade: newStats(n.statFade),
dirMap: make([]int, outNb, outNb),
decTmp: make([]int, outNb/2, outNb/2),
}
ant.setNetwork(ns)
return ant
}
func newAntWorker(ns *Nests, n *Nest, x float64, y float64, direction int) *Ant {
ant := newAnt(ns, n, 0)
ant.X = x
ant.Y = y
ant.Life = n.parameters.workerLife
ant.regularSpeed = n.parameters.workerMinSpeed + rand.Float64()*.1
ant.maxSpeed = n.parameters.workerMaxSpeed + rand.Float64()*.1
ant.speed = ant.regularSpeed
ant.Direction = direction
return ant
}
func newAntSoldier(ns *Nests, n *Nest, x float64, y float64, dx float64, dy float64, direction int) *Ant {
ant := newAnt(ns, n, 1)
ant.X = x
ant.Y = y
ant.dx = dx
ant.dy = dy
if dx != 0 && dy != 0 {
ant.soldierInitCounter = n.parameters.soldierInitCounter
}
ant.Life = n.parameters.soldierLife
ant.regularSpeed = n.parameters.soldierMinSpeed + rand.Float64()*.1
ant.maxSpeed = n.parameters.soldierMaxSpeed + rand.Float64()*.1
ant.speed = ant.regularSpeed
ant.Direction = direction
return ant
}
func (a *Ant) decrLife(ns *Nests, val int) {
if a.Life > 0 {
a.Life -= val
if a.Life <= 0 {
a.Life = 0
}
}
}
func (a *Ant) setNetwork(ns *Nests) {
if updateNetwork {
if a.AntType == 0 {
if a.nest.bestWorker != nil && rand.Float64() < a.nest.parameters.chanceToGetTheBestNetworkCopy {
net, err := a.nest.bestWorker.network.Copy()
if err == nil {
a.network = net
return
}
}
} else {
if a.nest.bestSoldier != nil && rand.Float64() < a.nest.parameters.chanceToGetTheBestNetworkCopy {
net, err := a.nest.bestSoldier.network.Copy()
if err == nil {
a.network = net
return
}
}
}
}
var defnet []int
if rand.Float64() < 0.5 || true {
defnet = make([]int, 3, 3)
defnet[0] = inNb
defnet[1] = int(5 + rand.Int31n(45))
defnet[2] = outNb
} else {
defnet = make([]int, 4, 4)
defnet[0] = inNb
defnet[1] = int(5 + rand.Int31n(45))
defnet[2] = int(5 + rand.Int31n(25))
defnet[3] = outNb
}
net, _ := network.NewNetwork(defnet)
a.network = net
}
func (a *Ant) nextTime(ns *Nests, update bool) {
if !a.tickLife(ns) {
return
}
a.lastLastDecision = a.lastDecision
a.lastDecision = a.Direction
a.displayInfo1(ns)
a.moveOnOut(ns)
if a.panic {
return
}
a.updateEntries(ns)
a.computeHappiness(ns)
a.displayInfo2(ns)
if ns.log && a == ns.selected {
a.printf(ns, "mode: %d entries: %s\n", a.mode, a.displayList(ns, a.entries, "%.2f"))
a.printf(ns, "happiness=%.3f\n", a.happiness)
}
if a.happiness == a.lastHappiness && a.happiness >= 0 {
a.printf(ns, "decision: no need\n")
} else if a.happiness < a.lastHappiness {
a.printf(ns, "bad last decision: %d\n", a.lastDecision)
if rand.Float64() > 1-(a.gRate-5)/100 && (a.lastResult || a.lastDecision != a.lastLastDecision) {
a.decide(ns)
a.printf(ns, "decision using network: %d\n", a.Direction)
a.lastDecisionRandom = false
a.statDecision.incr()
} else {
a.Direction = int(rand.Int31n(int32(outNb)))
//a.Direction = a.index + 4
//if a.Direction >= outNb {
//a.Direction = a.Direction - outNb
//}
a.dirMap[a.Direction]++
a.lastDecisionRandom = true
a.statDecision.incr()
a.printf(ns, "decision random: %d\n", a.Direction)
}
a.lastResult = false
} else if a.lastDecision != -1 && a.mode == a.lastMode {
a.statGoodDecision.incr()
a.lastResult = true
a.printf(ns, "decision: no need\n")
delta := (a.happiness - a.lastHappiness)
if delta > 0 && (delta < a.averageHappinessDelta[a.mode]*3 || a.averageHappinessDelta[a.mode] == 0) {
a.happinessDeltaSum[a.mode] += delta
a.happinessDeltaNumber[a.mode]++
}
a.printf(ns, "positive decision %d, delta=%.4f average=%.4f\n", a.lastDecision, delta, a.averageHappinessDelta)
_, ok := a.train(ns)
if ok {
a.printf(ns, "positive decision reinforced\n")
a.statReinforce.incr()
a.lastDecision = -1
}
}
if update {
a.update(ns)
}
}
func (a *Ant) tickLife(ns *Nests) bool {
if a.Life < 0 {
return false
}
if a.AntType == 0 {
if ns.timeRef%a.nest.parameters.workerLifeDecrPeriod == 0 {
a.decrLife(ns, 1)
if a.Life <= 0 {
return false
}
}
return true
}
if ns.timeRef%a.nest.parameters.soldierLifeDecrPeriod == 0 {
a.decrLife(ns, 1)
if a.Life <= 0 {
return false
}
}
return true
}
func (a *Ant) update(ns *Nests) {
if updateNetwork && ns.timeRef%5000 == 0 {
bestAnt := a.nest.bestWorker
if a.AntType == 1 {
bestAnt = a.nest.bestSoldier
}
if a.dirCount < bestAnt.dirCount-3 || (a.dirCount <= bestAnt.dirCount && a.gRate < bestAnt.gRate-10) {
net, err := bestAnt.network.Copy()
if err == nil {
a.network = net
a.lastDecision = -1
a.printf(ns, "update network with the best one: %v\n", a.network.Getdef())
}
}
}
}
func (a *Ant) decide(ns *Nests) bool {
if a.food != nil {
return false
}
ins, ok := a.preparedEntries(a.entries)
if !ok {
a.printf(ns, "bad entries: %s\n", a.displayList(ns, ins, "%.3f"))
return false
}
a.outs = a.network.Propagate(a.entries, true)
if ns.log {
a.printf(ns, "Compute decision, propagation: %s\n", a.displayList(ns, a.outs, "%.3f"))
}
direction := 0
max := 0.0
for ii, out := range a.outs {
if out > max {
max = out
direction = ii
}
}
a.Direction = direction
a.dirMap[direction]++
return true
}
func (a *Ant) commitPeriodStats(ns *Nests) {
if ns.stopped {
return
}
nb := 0
for _, val := range a.dirMap {
if val > 0 {
nb++
}
}
a.dirCount = nb
a.dirMap = make([]int, outNb, outNb)
//
for ii := 1; ii < 5; ii++ {
a.averageHappinessDelta[ii] = 0
if a.happinessDeltaNumber[ii] > 0 {
a.averageHappinessDelta[ii] = a.happinessDeltaSum[ii] / float64(a.happinessDeltaNumber[ii])
}
a.happinessDeltaSum[ii] = 0
a.happinessDeltaNumber[ii] = 0
}
if a.statDecision.value != 0 {
a.statDecision.push()
a.statGoodDecision.push()
a.statReinforce.push()
a.statFade.push()
a.gRate = float64(a.statGoodDecision.cumul) * 100.0 / float64(a.statDecision.cumul)
if a.gRate > 100 {
a.gRate = 100
}
}
}
func (a *Ant) updateEntries(ns *Nests) {
if ns.timeRef%100 == 0 {
a.Fight = false
}
for ii := range a.entries {
a.lastEntries[ii] = a.entries[ii]
a.entries[ii] = 0
}
if a.food != nil {
a.mode = 0
a.printf(ns, "Carry food no entries\n")
return
}
a.lastMode = a.mode
a.mode = 0
if a.updateEntriesForHostileAnts(ns) {
a.mode = 4
return
}
if a.AntType == 0 {
if a.updateEntriesForFoods(ns) {
a.mode = 2
return
}
if a.updateEntriesForPheromones(ns) {
a.mode = 3
return
}
}
a.updateEntriesForFriendAnts(ns)
a.mode = 1
}
func (a *Ant) updateEntriesForFoods(ns *Nests) bool {
dist2Max := a.vision * a.vision
dist2m := dist2Max
var foodMin *Food
for _, food := range ns.foods {
if !food.carried {
dist2 := (food.X-a.X)*(food.X-a.X) + (food.Y-a.Y)*(food.Y-a.Y)
if dist2 < dist2m {
foodMin = food
dist2m = dist2
}
}
}
a.printf(ns, "closest food: %+v\n", foodMin)
if foodMin != nil {
if dist2m < 5 {
a.carryFood(foodMin)
a.pheromoneCount = 0
return true
}
a.tmpFood = foodMin
index := a.getDirection(ns, foodMin.X, foodMin.Y)
a.entries[visionNb+index] = ((dist2Max - dist2m) / dist2Max)
return true
}
return false
}
func (a *Ant) updateEntriesForPheromones(ns *Nests) bool {
//minLevel := a.nest.parameters.pheromoneLevel + 1
minLevel := 1000000
dist2Max := a.vision * a.vision * 9
dist2m := dist2Max
var pheMin *Pheromone
for _, phe := range a.nest.pheromones {
if phe.Level > 0 {
dist2 := (phe.X-a.X)*(phe.X-a.X) + (phe.Y-a.Y)*(phe.Y-a.Y)
if dist2 < dist2Max && phe.id < minLevel {
minLevel = phe.id
index := a.getDirection(ns, phe.X, phe.Y)
//if ns.selected == a {
// fmt.Printf("Pheromone direction: %d\n", index)
//}
pheMin = phe
a.entries[visionNb*2+index] += (a.nest.parameters.pheromoneLevel - float64(phe.Level)) * ((dist2Max - dist2) / dist2Max)
if dist2 < dist2m {
pheMin = phe
dist2m = dist2
}
}
}
}
a.printf(ns, "pheromone: %+v\n", pheMin)
if pheMin != nil {
if a.lastPheromone == pheMin.id {
a.lastPheromoneCount++
} else {
a.lastPheromoneCount = 0
}
a.lastPheromone = pheMin.id
if a.lastPheromoneCount > 200 {
a.printf(ns, "same pheromone too much time: ignored\n")
for ii := visionNb * 2; ii < visionNb*3; ii++ {
a.entries[ii] = 0
}
}
return true
}
return false
}
func (a *Ant) updateEntriesForHostileAnts(ns *Nests) bool {
if a.AntType == 0 && !panicMode {
return false
}
dist2Max := a.vision * a.vision
dist2Contact := dist2Max / 8
if a.AntType == 1 {
dist2Max = dist2Max * 16
}
//pursue mode
if a.AntType == 1 && a.pursue != nil {
if a.pursue.Life > 0 {
a.printf(ns, "Pursue ant: %d\n", a.pursue.ID)
dist2m := a.distAnt2(a.pursue)
if dist2m > dist2Max*4 {
a.pursue = nil
} else if dist2m < dist2Contact {
a.pursue.decrLife(ns, 2)
index := a.getDirection(ns, a.pursue.X, a.pursue.Y)
a.entries[visionNb*3+index] = ((dist2Max - dist2m) / dist2Max)
return true
} else {
index := a.getDirection(ns, a.pursue.X, a.pursue.Y)
a.entries[visionNb*3+index] = ((dist2Max*4 - dist2m) / dist2Max / 4)
return true
}
} else {
a.pursue = nil
}
}
a.speed = a.regularSpeed
a.timeWithoutHostile++
var antMin *Ant
dist2m := dist2Max
for _, nest := range ns.nests {
if nest.id != a.nest.id {
for _, ant := range nest.ants {
if ant.Life > 0 {
dist2 := a.distAnt2(ant)
if dist2 < dist2m {
antMin = ant
dist2m = dist2
}
}
}
}
}
a.printf(ns, "closest hostile: %+v\n", antMin)
if antMin != nil {
a.soldierInitCounter = 0
a.timeWithoutHostile = 0
if a.AntType == 1 {
a.pursue = antMin
a.speed = a.maxSpeed
}
if dist2m < dist2Contact {
a.Fight = true
antMin.Fight = true
if a.AntType == 1 {
antMin.decrLife(ns, 2)
a.decrLife(ns, 1)
}
}
if a.AntType == 0 {
if rand.Float64() < 0.05 && (a.X-a.nest.x)*(a.X-a.nest.x)+(a.Y-a.nest.y)*(a.Y-a.nest.y) > 4000 {
a.printf(ns, "current ant panic mode\n")
a.panic = true
a.Fight = false
if a.food != nil {
a.dropFood(ns)
}
}
}
index := a.getDirection(ns, antMin.X, antMin.Y)
a.entries[visionNb*3+index] = ((dist2Max - dist2m) / dist2Max)
return true
}
return false
}
func (a *Ant) updateEntriesForFriendAnts(ns *Nests) bool {
dist2Max := a.vision * a.vision
dist2m := dist2Max
var antMin *Ant
if a.AntType == 0 {
for _, ant := range a.nest.ants {
if ant.Life > 0 && ant.AntType == 0 && ant != a && ant.food == nil {
dist2 := a.distAnt2(ant)
if dist2 < dist2m {
antMin = ant
dist2m = dist2
}
}
}
/*
for _, ant := range a.nest.ants {
if ant.Life > 0 && ant.AntType == 0 && ant != a && ant.food == nil {
dist2 := a.distAnt2(ant)
if dist2 < dist2Max {
antMin = ant
dist2m = dist2
index := a.getDirection(ns, antMin.X, antMin.Y)
a.entries[index] += ((dist2Max - dist2m) / dist2Max)
}
}
}
*/
}
if a.AntType == 1 && a.timeWithoutHostile > 5000 {
for _, ant := range a.nest.ants {
if ant.Life > 0 && ant != a && ant.food == nil {
dist2 := a.distAnt2(ant)
if dist2 < dist2m {
antMin = ant
dist2m = dist2
}
}
}
/*
for _, ant := range a.nest.ants {
if ant.Life > 0 && ant != a && ant.food == nil {
dist2 := a.distAnt2(ant)
if dist2 < dist2Max {
antMin = ant
dist2m = dist2
index := a.getDirection(ns, antMin.X, antMin.Y)
a.entries[index] += ((dist2Max - dist2m) / dist2Max)
}
}
}
*/
}
a.printf(ns, "closest friend: %+v\n", antMin)
if antMin != nil {
//index := a.getDirection(ns, antMin.X, antMin.Y)
//a.index = index
//a.entries[index] = ((dist2Max - dist2m) / dist2Max)
return true
}
return false
}
func (a *Ant) getDirection(ns *Nests, x float64, y float64) int {
ang := math.Atan2(x-a.X, y-a.Y)
if ang < 0 {
ang = 2*math.Pi + ang
}
index := int(ang/(math.Pi*2.0/float64(outNb))) + 1
if index >= outNb {
return index - outNb
}
return index
}
func (a *Ant) computeHappiness(ns *Nests) {
a.lastHappiness = a.happiness
a.happiness = 0
for ii, val := range a.entries {
if ii >= 0 && ii < visionNb {
a.happiness -= val
} else {
a.happiness += val
}
}
}
func (a *Ant) getDirIndex(nn int) int {
if nn >= outNb {
return nn - outNb
}
if nn < 0 {
return nn + outNb
}
return nn
}
func (a *Ant) moveOnOut(ns *Nests) {
a.lastx = a.X
a.lasty = a.Y
//for now the nest return is hard coded
if a.food != nil || a.panic {
a.moveToNest(ns)
return
}
if a.AntType == 1 && a.soldierInitCounter > 0 {
a.soldierInitCounter--
a.X += a.dx * a.speed
a.Y += a.dy * a.speed
} else {
angle := (math.Pi * 2 * float64(a.Direction)) / float64(outNb) //+ math.Pi/2
a.X += math.Sin(angle) * a.speed
a.Y += math.Cos(angle) * a.speed
}
max := 2.0
if a.X < ns.xmin*max {
//a.X = ns.xmax
a.X = ns.xmin * max
a.Direction = 1 + int(rand.Intn(outNb/4+1))
} else if a.Y < ns.ymin*max {
//a.Y = ns.ymax
a.Y = ns.ymin * max
a.Direction = 7 + int(rand.Intn(outNb/4+1))
} else if a.X > ns.xmax*max {
//a.X = ns.xmin
a.X = ns.xmax * max
a.Direction = 5 + int(rand.Intn(outNb/4+1))
} else if a.Y > ns.ymax*max {
//a.Y = ns.ymin
a.Y = ns.ymax * max
a.Direction = 3 + int(rand.Intn(outNb/4+1))
}
if a.Direction >= outNb {
a.Direction = a.Direction - outNb
}
}
func (a *Ant) moveToNest(ns *Nests) {
speed := a.speed
if a.panic {
a.Fight = false
speed = speed * 2
}
dd := math.Sqrt(float64((a.nest.x-a.X)*(a.nest.x-a.X) + (a.nest.y-a.Y)*(a.nest.y-a.Y)))
dx := (a.nest.x - a.X) / dd
dy := (a.nest.y - a.Y) / dd
a.Direction = a.getDirection(ns, a.nest.x, a.nest.y)
a.X += dx * speed
a.Y += dy * speed
if a.food != nil {
if a.nest.id == 1 {
a.food.X = a.X
a.food.Y = a.Y
} else {
a.food.X = a.X + 1
a.food.Y = a.Y + 1
}
a.pheromoneDelay--
if a.pheromoneDelay <= 0 {
a.printf(ns, "add food pheromone\n")
a.pheromoneCount++
a.nest.addPheromone(a.X, a.Y, a.pheromoneCount)
a.pheromoneDelay = a.nest.parameters.pheromoneAntDelay
}
}
if (a.nest.x-a.X)*(a.nest.x-a.X)+(a.nest.y-a.Y)*(a.nest.y-a.Y) < 1600 {
direc := a.Direction + outNb/2
if direc >= outNb {
direc = direc - outNb
}
if a.panic {
a.nest.addSoldier(ns, a.X, a.Y, -dx, -dy, direc)
a.panic = false
}
if a.food != nil {
a.nest.ressource += 4
if len(ns.foodGroups) > 0 {
if foodRenew {
a.food.renew()
}
}
a.Direction = direc
a.nest.addWorker(ns, a.X, a.Y, direc)
a.dropFood(ns)
}
}
}
func (a *Ant) train(ns *Nests) (int, bool) {
if a.lastDecision < 0 {
return 0, false
}
if a.trained {
return 0, true
}
nb := a.getNbTrain(ns)
if nb == 0 {
return 0, false
}
ins, ok := a.preparedEntries(a.lastEntries)
if !ok {
return 0, false
}
if !a.lastDecisionRandom {
return 0, true
}
//train a much as the decision appears good concidering delta happiness stats
for ii := 0; ii < nb; ii++ {
a.network.Propagate(ins, false)
a.setOuts(a.lastDecision)
if a == ns.selected {
ns.addSample(ins, a.outs)
}
a.network.BackPropagate(a.outs)
}
return nb, true
}
func (a *Ant) getNbTrain(ns *Nests) int {
delta := a.happiness - a.lastHappiness
var ret int
if a.averageHappinessDelta[a.mode] <= 0 {
ret = 0
} else if delta < a.averageHappinessDelta[a.mode]/2 {
ret = deltaBase
} else if delta < a.averageHappinessDelta[a.mode] {
ret = deltaBase
} else if delta < a.averageHappinessDelta[a.mode]*1.5 {
ret = deltaBase + 1
} else {
ret = deltaBase + 1
}
//a.printf(ns, "Positive decision, delta=%.3f average=%.3f max=%.3f nbTrain=%d\n", delta, a.averageHappinessDelta, a.happinnessDeltaMax, ret)
a.printf(ns, "Positive decision, delta=%.3f average=%.3f nbTrain=%d\n", delta, a.averageHappinessDelta, ret)
return ret * deltaCoef
}
func (a *Ant) fadeLastDecision(ns *Nests) bool {
if a.lastDecision == -1 || a.trained {
return false
}
ins, ok := a.preparedEntries(a.lastEntries)
if !ok {
return false
}
outs := a.network.Propagate(ins, true)
a.setOutsFaded(a.lastDecision)
if ns.log && a == ns.selected {
a.computeTrainResult(a.outs, outs)
a.printf(ns, "fade last decision: %d\n", a.lastDecision)
ns.addSample(ins, a.outs)
}
a.network.BackPropagate(a.outs)
return true
}
func (a *Ant) preparedEntries(list []float64) ([]float64, bool) {
ret := make([]float64, len(list), len(list))
isNull := true
for ii, val := range list {
ret[ii] = val
if val > 0 {
ret[ii] = 1 //val
isNull = false
}
}
return ret, !isNull
}
func (a *Ant) setOuts(direction int) {
for ii := range a.outs {
a.outs[ii] = 0
}
a.outs[direction] = 1
}
func (a *Ant) setOutsFaded(lastDecision int) {
for ii := range a.outs {
a.outs[ii] = 0.2
}
a.outs[lastDecision] = 0
}
func (a *Ant) dropFood(ns *Nests) {
if a.food != nil {
if foodRenew {
a.food.carried = false
}
a.food = nil
}
}
func (a *Ant) carryFood(f *Food) {
if !f.carried {
a.food = f
f.carried = true
}
}
func (a *Ant) displayInfo1(ns *Nests) {
if ns.log && ns.selected == a {
a.printf(ns, "-----------------------------------------------------\n")
ggRate := float64(a.statReinforce.scumul) * 100.0 / float64(a.statDecision.scumul)
a.printf(ns, "%d:[%d] type=%d reinforce:%d decision:%d period:good=%.2f%%) global:good=%.2f%%)\n", ns.timeRef, a.ID, a.AntType, a.statReinforce.cumul, a.statDecision.cumul, a.gRate, ggRate)
a.printf(ns, "network=%v hapiness=%.5f direction: %d last decision: %d (%d) result: %t\n", a.network.Getdef(), a.happiness, a.Direction, a.lastDecision, a.lastLastDecision, a.lastResult)
}
}
func (a *Ant) displayInfo2(ns *Nests) {
if ns.log && ns.selected == a {
a.printf(ns, "delta=%.4f average=%.4f\n", a.happiness-a.lastHappiness, a.averageHappinessDelta)
}
}
func (a *Ant) getModeToString() string {
if a.panic {
return "panic"
}
if a.food != nil {
return "carry food"
}
if a.AntType == 0 {
if a.Fight {
return "attacked"
}
} else {
if a.Fight {
return "attack"
}
if a.pursue != nil {
return "pursue"
}
}
return modes[a.mode]
}
|
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package sigmon
import (
"sync"
"syscall"
"testing"
"time"
)
var (
sigs = []syscall.Signal{
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGUSR1,
syscall.SIGUSR2,
}
)
type checkable struct {
sync.Mutex
id int
val int
ct int
}
func (c *checkable) handler(sm *SignalMonitor) {
c.Lock()
defer c.Unlock()
c.val = c.id
c.ct++
}
func (c *checkable) info() (id, val, ct int) {
c.Lock()
defer c.Unlock()
return c.id, c.val, c.ct
}
func TestUnitSignalJunctionConnect(t *testing.T) {
j := newSignalJunction()
j.connect()
for _, s := range sigs {
if err := callOSSignal(s); err != nil {
t.Errorf("unexpected error when calling %s: %s", s, err)
}
}
if !receiveOnAll(j) {
t.Fatal("should not wait forever")
}
}
func TestUnitSignalJunctionDisconnect(t *testing.T) {
j := newSignalJunction()
j.connect()
j.disconnect()
if receiveOnAll(j) {
t.Fatal("should wait forever")
}
}
func TestUnitSignalHandlerRegister(t *testing.T) {
c1 := &checkable{id: 123}
c2 := &checkable{id: 234}
h := newSignalHandler(nil)
h.register(c1.handler)
h.register(c2.handler)
select {
case fn := <-h.registry:
if fn == nil {
t.Error("want function, got nil")
}
fn(&SignalMonitor{})
case <-time.After(time.Millisecond):
t.Error("should not wait forever")
}
_, c1Val, _ := c1.info()
if 0 != c1Val {
t.Errorf("want %d, got %d", 0, c1Val)
}
c2ID, c2Val, _ := c2.info()
if c2ID != c2Val {
t.Errorf("want %d, got %d", c2ID, c2Val)
}
}
func TestUnitSignalHandlerSet(t *testing.T) {
c := &checkable{id: 123}
h := newSignalHandler(nil)
h.set(c.handler)
h.handler(&SignalMonitor{})
id, val, _ := c.info()
if id != val {
t.Errorf("want %d, got %d", id, val)
}
}
func TestUnitSignalHandlerHandle(t *testing.T) {
c := &checkable{id: 123}
h := newSignalHandler(c.handler)
h.handle(&SignalMonitor{})
id, val, _ := c.info()
if id != val {
t.Errorf("want %d, got %d", id, val)
}
}
func TestUnitSignalMonitorSet(t *testing.T) {
c := &checkable{id: 123}
m := New(nil)
m.Set(c.handler)
select {
case fn := <-m.handler.registry:
if fn == nil {
t.Error("want function, got nil")
}
case <-time.After(time.Millisecond):
t.Error("should not wait forever")
}
}
func TestUnitSignalMonitorScan(t *testing.T) {
m := New(nil)
time.AfterFunc(time.Second, func() {
go func() {
m.off <- struct{}{}
}()
want, got := false, m.scan()
if want != got {
t.Errorf("want %t, got %t", want, got)
}
go func() {
m.handler.registry <- func(sm *SignalMonitor) {}
m.junction.sighup <- syscall.SIGHUP
m.junction.sigint <- syscall.SIGINT
m.junction.sigterm <- syscall.SIGTERM
m.junction.sigusr1 <- syscall.SIGUSR1
m.junction.sigusr2 <- syscall.SIGUSR2
}()
for i := 0; i < 6; i++ {
want, got := true, m.scan()
if want != got {
t.Errorf("want %t, got %t", want, got)
}
}
})
}
func TestUnitSignalMonitorBiasedScan(t *testing.T) {
m := New(nil)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
wg.Wait()
m.junction.sighup <- syscall.SIGHUP
}()
go func() {
wg.Wait()
m.off <- struct{}{}
}()
go func() {
wg.Wait()
m.handler.registry <- func(sm *SignalMonitor) {}
}()
wg.Done()
delay()
m.biasedScan()
m.biasedScan()
select {
case <-m.junction.sighup:
default:
t.Error("bias may be wrong")
}
}
func TestUnitSignalMonitorRun(t *testing.T) {
c := &checkable{id: 123}
m := New(c.handler)
if m.on {
t.Errorf("want %t, got %t", false, m.on)
}
m.Run()
m.Run()
if !m.on {
t.Errorf("want %t, got %t", true, m.on)
}
s := syscall.SIGHUP
if err := callOSSignal(s); err != nil {
t.Errorf("unexpected error when calling %s: %s", s, err)
}
m.Stop()
id, val, ct := c.info()
if id != val {
t.Errorf("want %d, got %d", id, val)
}
if ct > 1 {
t.Error("signal possibly connected multiple times")
}
}
func TestUnitSignalMonitorStop(t *testing.T) {
c := &checkable{id: 123}
m := New(c.handler)
m.Run()
s := syscall.SIGHUP
if err := callOSSignal(s); err != nil {
t.Errorf("unexpected error when calling %s: %s", s, err)
}
m.Stop()
if m.on {
t.Errorf("want %t, got %t", false, m.on)
}
mx := New(nil)
mx.Run()
if err := callOSSignal(s); err != nil {
t.Errorf("unexpected error when calling %s: %s", s, err)
}
mx.Stop()
_, _, ct := c.info()
if 1 != ct {
t.Errorf("want %d, got %d", 1, ct)
}
}
func TestUnitSignalMonitorSig(t *testing.T) {
m := New(nil)
m.sig = SIGHUP
want, got := SIGHUP, m.Sig()
if want != got {
t.Errorf("want %s, got %s", want, got)
}
}
func receiveOnAll(j *signalJunction) bool {
for i := 0; i < 5; i++ {
select {
case <-j.sighup:
case <-j.sigint:
case <-j.sigterm:
case <-j.sigusr1:
case <-j.sigusr2:
case <-time.After(time.Microsecond * 100):
return false
}
}
return true
}
func callOSSignal(s syscall.Signal) error {
if err := syscall.Kill(syscall.Getpid(), s); err != nil {
return err
}
// delay for requested signal propagation
delay()
return nil
}
func delay() {
for i := 1 << 21; i > 0; i-- {
}
}
Increase delay/timeout durations in unit tests.
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris
package sigmon
import (
"sync"
"syscall"
"testing"
"time"
)
var (
sigs = []syscall.Signal{
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGUSR1,
syscall.SIGUSR2,
}
)
type checkable struct {
sync.Mutex
id int
val int
ct int
}
func (c *checkable) handler(sm *SignalMonitor) {
c.Lock()
defer c.Unlock()
c.val = c.id
c.ct++
}
func (c *checkable) info() (id, val, ct int) {
c.Lock()
defer c.Unlock()
return c.id, c.val, c.ct
}
func TestUnitSignalJunctionConnect(t *testing.T) {
j := newSignalJunction()
j.connect()
for _, s := range sigs {
if err := callOSSignal(s); err != nil {
t.Errorf("unexpected error when calling %s: %s", s, err)
}
}
if !receiveOnAll(j) {
t.Fatal("should not wait forever")
}
}
func TestUnitSignalJunctionDisconnect(t *testing.T) {
j := newSignalJunction()
j.connect()
j.disconnect()
if receiveOnAll(j) {
t.Fatal("should wait forever")
}
}
func TestUnitSignalHandlerRegister(t *testing.T) {
c1 := &checkable{id: 123}
c2 := &checkable{id: 234}
h := newSignalHandler(nil)
h.register(c1.handler)
h.register(c2.handler)
select {
case fn := <-h.registry:
if fn == nil {
t.Error("want function, got nil")
}
fn(&SignalMonitor{})
case <-time.After(time.Millisecond):
t.Error("should not wait forever")
}
_, c1Val, _ := c1.info()
if 0 != c1Val {
t.Errorf("want %d, got %d", 0, c1Val)
}
c2ID, c2Val, _ := c2.info()
if c2ID != c2Val {
t.Errorf("want %d, got %d", c2ID, c2Val)
}
}
func TestUnitSignalHandlerSet(t *testing.T) {
c := &checkable{id: 123}
h := newSignalHandler(nil)
h.set(c.handler)
h.handler(&SignalMonitor{})
id, val, _ := c.info()
if id != val {
t.Errorf("want %d, got %d", id, val)
}
}
func TestUnitSignalHandlerHandle(t *testing.T) {
c := &checkable{id: 123}
h := newSignalHandler(c.handler)
h.handle(&SignalMonitor{})
id, val, _ := c.info()
if id != val {
t.Errorf("want %d, got %d", id, val)
}
}
func TestUnitSignalMonitorSet(t *testing.T) {
c := &checkable{id: 123}
m := New(nil)
m.Set(c.handler)
select {
case fn := <-m.handler.registry:
if fn == nil {
t.Error("want function, got nil")
}
case <-time.After(time.Millisecond):
t.Error("should not wait forever")
}
}
func TestUnitSignalMonitorScan(t *testing.T) {
m := New(nil)
time.AfterFunc(time.Second*6, func() {
go func() {
m.off <- struct{}{}
}()
want, got := false, m.scan()
if want != got {
t.Errorf("want %t, got %t", want, got)
}
go func() {
m.handler.registry <- func(sm *SignalMonitor) {}
m.junction.sighup <- syscall.SIGHUP
m.junction.sigint <- syscall.SIGINT
m.junction.sigterm <- syscall.SIGTERM
m.junction.sigusr1 <- syscall.SIGUSR1
m.junction.sigusr2 <- syscall.SIGUSR2
}()
for i := 0; i < 6; i++ {
want, got := true, m.scan()
if want != got {
t.Errorf("want %t, got %t", want, got)
}
}
})
}
func TestUnitSignalMonitorBiasedScan(t *testing.T) {
m := New(nil)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
wg.Wait()
m.junction.sighup <- syscall.SIGHUP
}()
go func() {
wg.Wait()
m.off <- struct{}{}
}()
go func() {
wg.Wait()
m.handler.registry <- func(sm *SignalMonitor) {}
}()
wg.Done()
// delay so that each channel is filled simultaneously
delay()
m.biasedScan()
m.biasedScan()
select {
case <-m.junction.sighup:
default:
t.Error("bias may be wrong")
}
}
func TestUnitSignalMonitorRun(t *testing.T) {
c := &checkable{id: 123}
m := New(c.handler)
if m.on {
t.Errorf("want %t, got %t", false, m.on)
}
m.Run()
m.Run()
if !m.on {
t.Errorf("want %t, got %t", true, m.on)
}
s := syscall.SIGHUP
if err := callOSSignal(s); err != nil {
t.Errorf("unexpected error when calling %s: %s", s, err)
}
m.Stop()
id, val, ct := c.info()
if id != val {
t.Errorf("want %d, got %d", id, val)
}
if ct > 1 {
t.Error("signal possibly connected multiple times")
}
}
func TestUnitSignalMonitorStop(t *testing.T) {
c := &checkable{id: 123}
m := New(c.handler)
m.Run()
s := syscall.SIGHUP
if err := callOSSignal(s); err != nil {
t.Errorf("unexpected error when calling %s: %s", s, err)
}
m.Stop()
if m.on {
t.Errorf("want %t, got %t", false, m.on)
}
mx := New(nil)
mx.Run()
if err := callOSSignal(s); err != nil {
t.Errorf("unexpected error when calling %s: %s", s, err)
}
mx.Stop()
_, _, ct := c.info()
if 1 != ct {
t.Errorf("want %d, got %d", 1, ct)
}
}
func TestUnitSignalMonitorSig(t *testing.T) {
m := New(nil)
m.sig = SIGHUP
want, got := SIGHUP, m.Sig()
if want != got {
t.Errorf("want %s, got %s", want, got)
}
}
func receiveOnAll(j *signalJunction) bool {
for i := 0; i < 5; i++ {
select {
case <-j.sighup:
case <-j.sigint:
case <-j.sigterm:
case <-j.sigusr1:
case <-j.sigusr2:
case <-time.After(time.Microsecond * 100):
return false
}
}
return true
}
func callOSSignal(s syscall.Signal) error {
if err := syscall.Kill(syscall.Getpid(), s); err != nil {
return err
}
// delay for requested signal propagation
delay()
return nil
}
func delay() {
for i := 1 << 23; i > 0; i-- {
}
}
|
package keys
import (
"encoding/hex"
"errors"
)
// HexBytes represents hexadecimal bytes
type HexBytes []byte
// UnmarshalJSON allows the representation in JSON of hexbytes
func (b *HexBytes) UnmarshalJSON(data []byte) error {
if len(data) < 2 || len(data)%2 != 0 || data[0] != '"' || data[len(data)-1] != '"' {
return errors.New("tuf: invalid JSON hex bytes")
}
res := make([]byte, hex.DecodedLen(len(data)-2))
_, err := hex.Decode(res, data[1:len(data)-1])
if err != nil {
return err
}
*b = res
return nil
}
// MarshalJSON allows the representation in JSON of hexbytes
func (b HexBytes) MarshalJSON() ([]byte, error) {
res := make([]byte, hex.EncodedLen(len(b))+2)
res[0] = '"'
res[len(res)-1] = '"'
hex.Encode(res[1:], b)
return res, nil
}
func (b HexBytes) String() string {
return hex.EncodeToString(b)
}
Removing unused hex_bytes.go
Signed-off-by: Diogo Monica <47da36337c9140e2e9f1517a0ddeb0025e0c3310@docker.com>
|
package netengine
import (
"errors"
"fmt"
"net"
"sync"
"time"
)
func (c *NetEngine) Init() error {
c.conntion_list = make(map[int]*conntion)
c.listener_list = make(map[int]*listener)
c.id = 1
c.lock = new(sync.Mutex)
c.add_conntion_chan = make(chan add_conntion_msg)
c.del_conntion_chan = make(chan int)
c.stop_chan = make(chan stop_msg)
c.get_remote_addr_chan = make(chan get_addr_msg)
c.get_local_addr_chan = make(chan get_addr_msg)
c.add_listen_chan = make(chan add_listen_msg)
c.start_chan = make(chan start_msg)
c.send_chan = make(chan send_msg, 1024)
c.get_sendfunc_chan = make(chan get_sendfunc_msg)
c.close_chan = make(chan close_msg)
go c.manage_run()
return nil
}
func (c *NetEngine) Stop() {
var msg stop_msg
msg.ch = make(chan int)
c.stop_chan <- msg
_ = <-msg.ch
// close all chan
close(c.get_remote_addr_chan)
close(c.get_local_addr_chan)
close(c.add_listen_chan)
close(c.add_conntion_chan)
close(c.del_conntion_chan)
close(c.start_chan)
close(c.send_chan)
close(c.close_chan)
}
func (c *NetEngine) GetRemoteAddr(id int) (net.Addr, bool) {
defer func() {
recover()
}()
var msg get_addr_msg
msg.ID = id
msg.ch = make(chan net.Addr)
c.get_remote_addr_chan <- msg
addr, ok := <-msg.ch
if !ok {
return nil, false
}
return addr, true
}
func (c *NetEngine) GetLocalAddr(id int) (net.Addr, bool) {
defer func() {
recover()
}()
var msg get_addr_msg
msg.ID = id
msg.ch = make(chan net.Addr)
c.get_local_addr_chan <- msg
addr, ok := <-msg.ch
if !ok {
return nil, false
}
return addr, true
}
func (c *NetEngine) AddListen(lis net.Listener, notify NetNotify) (id int, err error) {
defer func() {
recover()
}()
var msg add_listen_msg
msg.Lis = lis
msg.Notify = notify
msg.ch = make(chan listen_ret_msg)
c.add_listen_chan <- msg
r, ok := <-msg.ch
if !ok {
return 0, errors.New("can't get result")
}
return r.ID, r.err
}
func (c *NetEngine) AddConnection(con net.Conn, notify NetNotify, recvBufLen, maxSendBufLen int, readTimeout,writeTimeout time.Duration) (id int, err error) {
defer func() {
recover()
}()
var msg add_conntion_msg
msg.Con = con
msg.Notify = notify
msg.MaxBufLen = maxSendBufLen
msg.RecvBufLen = recvBufLen
msg.ReadTimeout = readTimeout
msg.WriteTimeout = writeTimeout
msg.ch = make(chan add_conntion_ret_msg)
c.add_conntion_chan <- msg
r, ok := <-msg.ch
if !ok {
return 0, errors.New("can't get result")
}
return r.ID, r.err
}
/*
Listen or CoonnectTo
SetBuffer;SetCloseTime
Start
*/
func (c *NetEngine) Start(id int) {
defer func() {
recover()
}()
var msg start_msg
msg.ID = id
c.start_chan <- msg
}
// Send is asynchronous,不会持有data
func (c *NetEngine) Send(id int, data []byte) {
defer func() {
recover()
}()
var msg send_msg
msg.ID = id
msg.Data = make([]byte, len(data))
copy(msg.Data, data)
c.send_chan <- msg
}
// SendFunc is synchronous
func (c *NetEngine) GetSendFunc(id int) SendFunc {
defer func() {
recover()
}()
var msg get_sendfunc_msg
msg.ID = id
msg.ch = make(chan SendFunc)
c.get_sendfunc_chan <- msg
r, ok := <-msg.ch
if !ok {
fmt.Println("can't get send func")
return nil
}
if r == nil {
fmt.Println("can't get nil send func")
}
return r
}
func (c *NetEngine) Close(id int) {
defer func() {
recover()
}()
var msg close_msg
msg.ID = id
c.close_chan <- msg
}
修改发送队列大小为10W
package netengine
import (
"errors"
"fmt"
"net"
"sync"
"time"
)
func (c *NetEngine) Init() error {
c.conntion_list = make(map[int]*conntion)
c.listener_list = make(map[int]*listener)
c.id = 1
c.lock = new(sync.Mutex)
c.add_conntion_chan = make(chan add_conntion_msg)
c.del_conntion_chan = make(chan int)
c.stop_chan = make(chan stop_msg)
c.get_remote_addr_chan = make(chan get_addr_msg)
c.get_local_addr_chan = make(chan get_addr_msg)
c.add_listen_chan = make(chan add_listen_msg)
c.start_chan = make(chan start_msg)
c.send_chan = make(chan send_msg, 10*10000)
c.get_sendfunc_chan = make(chan get_sendfunc_msg)
c.close_chan = make(chan close_msg)
go c.manage_run()
return nil
}
func (c *NetEngine) Stop() {
var msg stop_msg
msg.ch = make(chan int)
c.stop_chan <- msg
_ = <-msg.ch
// close all chan
close(c.get_remote_addr_chan)
close(c.get_local_addr_chan)
close(c.add_listen_chan)
close(c.add_conntion_chan)
close(c.del_conntion_chan)
close(c.start_chan)
close(c.send_chan)
close(c.close_chan)
}
func (c *NetEngine) GetRemoteAddr(id int) (net.Addr, bool) {
defer func() {
recover()
}()
var msg get_addr_msg
msg.ID = id
msg.ch = make(chan net.Addr)
c.get_remote_addr_chan <- msg
addr, ok := <-msg.ch
if !ok {
return nil, false
}
return addr, true
}
func (c *NetEngine) GetLocalAddr(id int) (net.Addr, bool) {
defer func() {
recover()
}()
var msg get_addr_msg
msg.ID = id
msg.ch = make(chan net.Addr)
c.get_local_addr_chan <- msg
addr, ok := <-msg.ch
if !ok {
return nil, false
}
return addr, true
}
func (c *NetEngine) AddListen(lis net.Listener, notify NetNotify) (id int, err error) {
defer func() {
recover()
}()
var msg add_listen_msg
msg.Lis = lis
msg.Notify = notify
msg.ch = make(chan listen_ret_msg)
c.add_listen_chan <- msg
r, ok := <-msg.ch
if !ok {
return 0, errors.New("can't get result")
}
return r.ID, r.err
}
func (c *NetEngine) AddConnection(con net.Conn, notify NetNotify, recvBufLen, maxSendBufLen int, readTimeout,writeTimeout time.Duration) (id int, err error) {
defer func() {
recover()
}()
var msg add_conntion_msg
msg.Con = con
msg.Notify = notify
msg.MaxBufLen = maxSendBufLen
msg.RecvBufLen = recvBufLen
msg.ReadTimeout = readTimeout
msg.WriteTimeout = writeTimeout
msg.ch = make(chan add_conntion_ret_msg)
c.add_conntion_chan <- msg
r, ok := <-msg.ch
if !ok {
return 0, errors.New("can't get result")
}
return r.ID, r.err
}
/*
Listen or CoonnectTo
SetBuffer;SetCloseTime
Start
*/
func (c *NetEngine) Start(id int) {
defer func() {
recover()
}()
var msg start_msg
msg.ID = id
c.start_chan <- msg
}
// Send is asynchronous,不会持有data
func (c *NetEngine) Send(id int, data []byte) {
defer func() {
recover()
}()
var msg send_msg
msg.ID = id
msg.Data = make([]byte, len(data))
copy(msg.Data, data)
c.send_chan <- msg
}
// SendFunc is synchronous
func (c *NetEngine) GetSendFunc(id int) SendFunc {
defer func() {
recover()
}()
var msg get_sendfunc_msg
msg.ID = id
msg.ch = make(chan SendFunc)
c.get_sendfunc_chan <- msg
r, ok := <-msg.ch
if !ok {
fmt.Println("can't get send func")
return nil
}
if r == nil {
fmt.Println("can't get nil send func")
}
return r
}
func (c *NetEngine) Close(id int) {
defer func() {
recover()
}()
var msg close_msg
msg.ID = id
c.close_chan <- msg
}
|
package transfer
import (
"testing"
stripe "github.com/stripe/stripe-go"
"github.com/stripe/stripe-go/account"
"github.com/stripe/stripe-go/charge"
"github.com/stripe/stripe-go/currency"
. "github.com/stripe/stripe-go/utils"
)
func init() {
stripe.Key = GetTestKey()
}
func TestTransferAllMethods(t *testing.T) {
chargeParams := &stripe.ChargeParams{
Amount: 1000,
Currency: currency.USD,
Source: &stripe.SourceParams{
Card: &stripe.CardParams{
Number: "4000000000000077",
Month: "06",
Year: "20",
},
},
}
charge, err := charge.New(chargeParams)
if err != nil {
t.Error(err)
}
params := &stripe.AccountParams{
Type: stripe.AccountTypeCustom,
Country: "US",
LegalEntity: &stripe.LegalEntity{
Type: stripe.Individual,
DOB: stripe.DOB{
Day: 1,
Month: 2,
Year: 1990,
},
},
}
acc, err := account.New(params)
if err != nil {
t.Error(err)
}
transferParams := &stripe.TransferParams{
Amount: 100,
Currency: currency.USD,
Dest: acc.ID,
SourceTx: charge.ID,
}
target, err := New(transferParams)
if err != nil {
t.Error(err)
}
if target.Amount != transferParams.Amount {
t.Errorf("Amount %v does not match expected amount %v\n", target.Amount, transferParams.Amount)
}
if target.Currency != transferParams.Currency {
t.Errorf("Curency %q does not match expected currency %q\n", target.Currency, transferParams.Currency)
}
if target.Created == 0 {
t.Errorf("Created date is not set\n")
}
transferRetrieved, err := Get(target.ID, nil)
if err != nil {
t.Error(err)
}
if transferRetrieved.ID != target.ID {
t.Errorf("ID %q does not match expected ID %q\n", transferRetrieved.ID, target.ID)
}
updateParams := &stripe.TransferParams{}
updateParams.AddMeta("foo", "bar")
transferUpdated, err := Update(target.ID, updateParams)
if err != nil {
t.Error(err)
}
if transferUpdated.Meta["foo"] != "bar" {
t.Error("Transfer metadata not updated")
}
multipleTransferParams := &stripe.TransferParams{
Amount: 100,
Currency: currency.USD,
Dest: acc.ID,
}
for i := 0; i < 3; i++ {
New(multipleTransferParams)
}
nbTransfers := 0
i := List(&stripe.TransferListParams{Dest: acc.ID})
for i.Next() {
if i.Transfer() == nil {
t.Error("No nil values expected")
}
if i.Meta() == nil {
t.Error("No metadata returned")
}
nbTransfers++
}
if err := i.Err(); err != nil {
t.Error(err)
}
if nbTransfers != 4 {
t.Errorf("Expected 4 transfers on %q but got %q\n", acc.ID, nbTransfers)
}
account.Del(acc.ID)
}
Fix build with go fmt
package transfer
import (
"testing"
stripe "github.com/stripe/stripe-go"
"github.com/stripe/stripe-go/account"
"github.com/stripe/stripe-go/charge"
"github.com/stripe/stripe-go/currency"
. "github.com/stripe/stripe-go/utils"
)
func init() {
stripe.Key = GetTestKey()
}
func TestTransferAllMethods(t *testing.T) {
chargeParams := &stripe.ChargeParams{
Amount: 1000,
Currency: currency.USD,
Source: &stripe.SourceParams{
Card: &stripe.CardParams{
Number: "4000000000000077",
Month: "06",
Year: "20",
},
},
}
charge, err := charge.New(chargeParams)
if err != nil {
t.Error(err)
}
params := &stripe.AccountParams{
Type: stripe.AccountTypeCustom,
Country: "US",
LegalEntity: &stripe.LegalEntity{
Type: stripe.Individual,
DOB: stripe.DOB{
Day: 1,
Month: 2,
Year: 1990,
},
},
}
acc, err := account.New(params)
if err != nil {
t.Error(err)
}
transferParams := &stripe.TransferParams{
Amount: 100,
Currency: currency.USD,
Dest: acc.ID,
SourceTx: charge.ID,
}
target, err := New(transferParams)
if err != nil {
t.Error(err)
}
if target.Amount != transferParams.Amount {
t.Errorf("Amount %v does not match expected amount %v\n", target.Amount, transferParams.Amount)
}
if target.Currency != transferParams.Currency {
t.Errorf("Curency %q does not match expected currency %q\n", target.Currency, transferParams.Currency)
}
if target.Created == 0 {
t.Errorf("Created date is not set\n")
}
transferRetrieved, err := Get(target.ID, nil)
if err != nil {
t.Error(err)
}
if transferRetrieved.ID != target.ID {
t.Errorf("ID %q does not match expected ID %q\n", transferRetrieved.ID, target.ID)
}
updateParams := &stripe.TransferParams{}
updateParams.AddMeta("foo", "bar")
transferUpdated, err := Update(target.ID, updateParams)
if err != nil {
t.Error(err)
}
if transferUpdated.Meta["foo"] != "bar" {
t.Error("Transfer metadata not updated")
}
multipleTransferParams := &stripe.TransferParams{
Amount: 100,
Currency: currency.USD,
Dest: acc.ID,
}
for i := 0; i < 3; i++ {
New(multipleTransferParams)
}
nbTransfers := 0
i := List(&stripe.TransferListParams{Dest: acc.ID})
for i.Next() {
if i.Transfer() == nil {
t.Error("No nil values expected")
}
if i.Meta() == nil {
t.Error("No metadata returned")
}
nbTransfers++
}
if err := i.Err(); err != nil {
t.Error(err)
}
if nbTransfers != 4 {
t.Errorf("Expected 4 transfers on %q but got %q\n", acc.ID, nbTransfers)
}
account.Del(acc.ID)
}
|
package manager
import (
"fmt"
"net/http"
"veyron2/ipc/stream"
)
// HTTPHandler returns an http.Handler that dumps out debug information from
// the stream.Manager.
//
// If the stream.Manager was not created by InternalNew in this package, an error
// will be returned instead.
//
// TODO(ashankar): This should be made a secure handler that only exposes information
// on VCs that an identity authenticated over HTTPS has access to.
func HTTPHandler(mgr stream.Manager) (http.Handler, error) {
m, ok := mgr.(*manager)
if !ok {
return nil, fmt.Errorf("unrecognized stream.Manager implementation: %T", mgr)
}
return httpHandler{m}, nil
}
type httpHandler struct{ m *manager }
func (h httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain")
w.Write([]byte(h.m.DebugString()))
}
go/src/veyron/runtimes/google/ipc/stream/manager/http: utf-8 encodings
http does not specify the default character encoding. Add additional
header to ask for utf-8 on browsers that are not utf-8 by default.
Change-Id: I326c99cdd8a8ebbd563dd4379a667e98718a93ed
package manager
import (
"fmt"
"net/http"
"veyron2/ipc/stream"
)
// HTTPHandler returns an http.Handler that dumps out debug information from
// the stream.Manager.
//
// If the stream.Manager was not created by InternalNew in this package, an error
// will be returned instead.
//
// TODO(ashankar): This should be made a secure handler that only exposes information
// on VCs that an identity authenticated over HTTPS has access to.
func HTTPHandler(mgr stream.Manager) (http.Handler, error) {
m, ok := mgr.(*manager)
if !ok {
return nil, fmt.Errorf("unrecognized stream.Manager implementation: %T", mgr)
}
return httpHandler{m}, nil
}
type httpHandler struct{ m *manager }
func (h httpHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/plain; charset=UTF-8")
w.Write([]byte(h.m.DebugString()))
}
|
package databuf
import (
"encoding/binary"
"errors"
"io"
"reflect"
"strconv"
)
func _getStructVersion(V *reflect.Value) uint8 {
var ver uint64
for i := 0; i < V.NumField(); i++ {
fieldVersion := V.Type().Field(i).Tag.Get("version")
if 0 == len(fieldVersion) {
continue
}
if t, err := strconv.ParseUint(fieldVersion, 10, 64); (err == nil) && (t > ver) {
ver = t
}
}
return uint8(ver)
}
type DataWriter struct {
buf io.Writer
}
func NewWriter(buf io.Writer) *DataWriter {
return &DataWriter{buf}
}
func (w *DataWriter) WriteTagData(tag DataTag) (err error) {
return binary.Write(w.buf, binary.BigEndian, tag.Pack())
}
func (w *DataWriter) WriteUintData(size_tag uint, value uint64) (err error) {
switch size_tag {
case TAG_0:
return nil
case TAG_1:
return binary.Write(w.buf, binary.BigEndian, uint8(value))
case TAG_2:
return binary.Write(w.buf, binary.BigEndian, uint16(value))
case TAG_4:
return binary.Write(w.buf, binary.BigEndian, uint32(value))
default:
return binary.Write(w.buf, binary.BigEndian, uint64(value))
}
}
func (w *DataWriter) Write(value interface{}) (err error) {
V := reflect.ValueOf(value)
if V.Kind() == reflect.Ptr {
return w.Write(V.Elem().Interface())
}
var tag DataTag
switch V.Kind() {
case reflect.Map:
keys := V.MapKeys()
length := len(keys)
tag := DataTag{TYPE_ARRAY, SizeTag(length), false}
if err = w.WriteTagData(tag); err != nil {
return
}
if err = w.WriteUintData(tag.SizeTag, uint64(length)); err != nil {
return
}
for i := 0; i < length; i++ {
v := V.MapIndex(keys[i])
if err = w.Write(keys[i].Interface()); err != nil {
return
}
if err = w.Write(v.Interface()); err != nil {
return
}
}
case reflect.Slice, reflect.Array:
length := V.Len()
tag := DataTag{TYPE_ARRAY, SizeTag(length), false}
if err = w.WriteTagData(tag); err != nil {
return
}
if err = w.WriteUintData(tag.SizeTag, uint64(length)); err != nil {
return
}
for i := 0; i < length; i++ {
v := V.Index(i)
if err = w.Write(v.Interface()); err != nil {
return
}
}
case reflect.Struct:
length := V.NumField()
stVersion := _getStructVersion(&V)
tag := DataTag{TYPE_OBJECT, SizeTag(length), stVersion != 0}
if err = w.WriteTagData(tag); err != nil {
return
}
if err = w.WriteUintData(tag.SizeTag, uint64(length)); err != nil {
return
}
if tag.VersionTag {
if err = w.WriteUintData(TAG_1, uint64(stVersion)); err != nil {
return
}
}
for i := 0; i < length; i++ {
v := V.Field(i)
if err = w.Write(v.Interface()); err != nil {
return
}
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
tag := DataTag{TYPE_INT, SizeTag(value), false}
if err = w.WriteTagData(tag); err != nil {
return
}
switch tag.SizeTag {
case TAG_0:
return nil
case TAG_1:
return binary.Write(w.buf, binary.BigEndian, int8(V.Int()))
case TAG_2:
return binary.Write(w.buf, binary.BigEndian, int16(V.Int()))
case TAG_4:
return binary.Write(w.buf, binary.BigEndian, int32(V.Int()))
default:
return binary.Write(w.buf, binary.BigEndian, int64(V.Int()))
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
tag := DataTag{TYPE_UINT, SizeTag(value), false}
if err = w.WriteTagData(tag); err != nil {
return
}
switch tag.SizeTag {
case TAG_0:
return nil
case TAG_1:
return binary.Write(w.buf, binary.BigEndian, uint8(V.Uint()))
case TAG_2:
return binary.Write(w.buf, binary.BigEndian, uint16(V.Uint()))
case TAG_4:
return binary.Write(w.buf, binary.BigEndian, uint32(V.Uint()))
default:
return binary.Write(w.buf, binary.BigEndian, uint64(V.Uint()))
}
case reflect.Float32, reflect.Float64:
if V.Kind() == reflect.Float32 {
tag = DataTag{TYPE_REAL, TAG_1, false}
} else {
tag = DataTag{TYPE_REAL, TAG_2, false}
}
if err = w.WriteTagData(tag); err != nil {
return
}
return binary.Write(w.buf, binary.BigEndian, value)
case reflect.Bool:
if V.Bool() {
tag = DataTag{TYPE_BOOL, TAG_1, false}
} else {
tag = DataTag{TYPE_BOOL, TAG_0, false}
}
if err = w.WriteTagData(tag); err != nil {
return
}
case reflect.String:
cBuff := []byte(V.String())
tag = DataTag{TYPE_STRING, SizeTag(len(cBuff)), false}
if err = w.WriteTagData(tag); err != nil {
return
}
if err = w.WriteUintData(tag.SizeTag, uint64(len(cBuff))); err != nil {
return
}
if _, err = w.buf.Write([]byte(cBuff)); err != nil {
return
}
default:
return errors.New("unsupported type " + reflect.TypeOf(value).String())
}
return
}
DataWriter code optimize.
package databuf
import (
"encoding/binary"
"errors"
"io"
"reflect"
"strconv"
)
func _getStructVersion(V *reflect.Value) uint8 {
var ver uint64
for i := 0; i < V.NumField(); i++ {
fieldVersion := V.Type().Field(i).Tag.Get("version")
if 0 == len(fieldVersion) {
continue
}
if t, err := strconv.ParseUint(fieldVersion, 10, 64); (err == nil) && (t > ver) {
ver = t
}
}
return uint8(ver)
}
type DataWriter struct {
buf io.Writer
}
func NewWriter(buf io.Writer) *DataWriter {
return &DataWriter{buf}
}
func (w *DataWriter) WriteTagData(tag DataTag) (err error) {
return binary.Write(w.buf, binary.BigEndian, tag.Pack())
}
func (w *DataWriter) WriteUintData(size_tag uint, value uint64) (err error) {
switch size_tag {
case TAG_0:
return nil
case TAG_1:
return binary.Write(w.buf, binary.BigEndian, uint8(value))
case TAG_2:
return binary.Write(w.buf, binary.BigEndian, uint16(value))
case TAG_4:
return binary.Write(w.buf, binary.BigEndian, uint32(value))
default:
return binary.Write(w.buf, binary.BigEndian, uint64(value))
}
}
func (w *DataWriter) WriteIntData(size_tag uint, value int64) (err error) {
switch size_tag {
case TAG_0:
return nil
case TAG_1:
return binary.Write(w.buf, binary.BigEndian, int8(value))
case TAG_2:
return binary.Write(w.buf, binary.BigEndian, int16(value))
case TAG_4:
return binary.Write(w.buf, binary.BigEndian, int32(value))
default:
return binary.Write(w.buf, binary.BigEndian, int64(value))
}
}
func (w *DataWriter) Write(value interface{}) (err error) {
V := reflect.ValueOf(value)
if V.Kind() == reflect.Ptr {
return w.Write(V.Elem().Interface())
}
var tag DataTag
switch V.Kind() {
case reflect.Map:
keys := V.MapKeys()
length := len(keys)
tag := DataTag{TYPE_ARRAY, SizeTag(length), false}
if err = w.WriteTagData(tag); err != nil {
return
}
if err = w.WriteUintData(tag.SizeTag, uint64(length)); err != nil {
return
}
for i := 0; i < length; i++ {
v := V.MapIndex(keys[i])
if err = w.Write(keys[i].Interface()); err != nil {
return
}
if err = w.Write(v.Interface()); err != nil {
return
}
}
case reflect.Slice, reflect.Array:
length := V.Len()
tag := DataTag{TYPE_ARRAY, SizeTag(length), false}
if err = w.WriteTagData(tag); err != nil {
return
}
if err = w.WriteUintData(tag.SizeTag, uint64(length)); err != nil {
return
}
for i := 0; i < length; i++ {
v := V.Index(i)
if err = w.Write(v.Interface()); err != nil {
return
}
}
case reflect.Struct:
length := V.NumField()
stVersion := _getStructVersion(&V)
tag := DataTag{TYPE_OBJECT, SizeTag(length), stVersion != 0}
if err = w.WriteTagData(tag); err != nil {
return
}
if err = w.WriteUintData(tag.SizeTag, uint64(length)); err != nil {
return
}
if tag.VersionTag {
if err = w.WriteUintData(TAG_1, uint64(stVersion)); err != nil {
return
}
}
for i := 0; i < length; i++ {
v := V.Field(i)
if err = w.Write(v.Interface()); err != nil {
return
}
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
tag := DataTag{TYPE_INT, SizeTag(value), false}
if err = w.WriteTagData(tag); err != nil {
return
}
return w.WriteIntData(tag.SizeTag, V.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
tag := DataTag{TYPE_UINT, SizeTag(value), false}
if err = w.WriteTagData(tag); err != nil {
return
}
return w.WriteUintData(tag.SizeTag, V.Uint())
case reflect.Float32, reflect.Float64:
if V.Kind() == reflect.Float32 {
tag = DataTag{TYPE_REAL, TAG_4, false}
} else {
tag = DataTag{TYPE_REAL, TAG_8, false}
}
if err = w.WriteTagData(tag); err != nil {
return
}
return binary.Write(w.buf, binary.BigEndian, value)
case reflect.Bool:
if V.Bool() {
tag = DataTag{TYPE_BOOL, TAG_1, false}
} else {
tag = DataTag{TYPE_BOOL, TAG_0, false}
}
if err = w.WriteTagData(tag); err != nil {
return
}
case reflect.String:
cBuff := []byte(V.String())
tag = DataTag{TYPE_STRING, SizeTag(len(cBuff)), false}
if err = w.WriteTagData(tag); err != nil {
return
}
if err = w.WriteUintData(tag.SizeTag, uint64(len(cBuff))); err != nil {
return
}
if _, err = w.buf.Write([]byte(cBuff)); err != nil {
return
}
default:
return errors.New("unsupported type " + reflect.TypeOf(value).String())
}
return
}
|
/*
Sample standalone app.
Create pswd.go with something like the following:
func init() {
pswd = "yourpassword3292390"
thuder.SetPinID(17) //to use pin 17 as light indicator
filters = []thuder.Filter{...} //filters for what oprations are allowed by the host
postScript = "..." //a commad to run after files are synchronized.
}
*/
package main
import (
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
logLab "log"
"os"
"os/exec"
"path/filepath"
"runtime"
"time"
"github.com/xiegeo/thuder"
)
var monitor = flag.Bool("monitor", false, "Enables monitoring for new mounts and runs pull and push automatically.")
var hostConfigName = flag.String("host_config", "", "Set the path to the read and write host config. "+
"For security purpurses, this file should not be on the same storage device that thuder is backing up to, "+
"as such is equivalent to allowing all operations listed in that device. "+
"Default value is empty, which disables using config file from overwriting build time settings.")
var sleep = time.Second * 5 //How often to pull mediaLocation to detect new devices.
var logE = logLab.New(os.Stderr, "[thuder err]", logLab.LstdFlags)
// optional build time customizations
var filters []thuder.Filter //set this to set default host filters
var postScripts []string //set this to run after pull/push
func main() {
flag.Parse()
if !*monitor {
hc, err := hostConfig()
if err != nil {
panic(err)
}
runOnce(hc)
return
}
thuder.FlashLED() //flash once for monitoring on
for ; ; time.Sleep(sleep) {
hc, err := hostConfig()
if err != nil {
continue
}
_, err = os.Open(hc.DefaultDirectory())
if err != nil {
//fmt.Println(err)
if !os.IsNotExist(err) {
logE.Println(err)
}
continue
}
runOnce(hc)
fmt.Println("waiting for media to be removed")
for err == nil {
time.Sleep(time.Second)
_, err = os.Open(hc.DefaultDirectory())
}
fmt.Println("removed: ", err)
}
}
//loadDefault loads the default HostConfig
func loadDefault() (*thuder.HostConfig, error) {
hc := &thuder.HostConfig{}
hc.MediaLocation = mediaLocation()
uhn, err := thuder.GenerateUniqueHostname()
if err != nil {
return nil, err
}
hc.UniqueHostName = uhn
hc.Filters = filters
hc.Group = groupName()
return hc, nil
}
func hostConfig() (*thuder.HostConfig, error) {
fn := *hostConfigName
if fn == "" {
return loadDefault()
}
file, err := os.Open(fn)
if err != nil {
if os.IsNotExist(err) {
//load and save default HostConfig
hc, err := loadDefault()
if err != nil {
return nil, err
}
err = saveFile(fn, hc)
if err != nil {
logE.Println(err)
}
return hc, nil
}
return nil, err
}
dec := json.NewDecoder(file)
hc := &thuder.HostConfig{}
err = dec.Decode(hc)
if err != nil {
return nil, err
}
//UniqueHostName does not match expected, the file could have been copied from
//a different system. Fix this to avoid name collision.
uhn, err := thuder.GenerateUniqueHostname()
if err != nil {
return nil, err
}
if hc.UniqueHostName != uhn {
hc.UniqueHostName = uhn
err = saveFile(fn, hc)
if err != nil {
logE.Println(err)
}
}
return hc, nil
}
func runOnce(hc *thuder.HostConfig) error {
defer func(a, b io.Writer, c *logLab.Logger) {
thuder.LogErrorOut = a
thuder.LogVerboseOut = b
logE = c
}(thuder.LogErrorOut, thuder.LogVerboseOut, logE)
lw := logger(hc)
thuder.LogErrorOut = lw
thuder.LogVerboseOut = lw
logE = logLab.New(lw, "[thuder err]", logLab.LstdFlags)
fmt.Fprintln(lw, "start thuder ", time.Now())
defer fmt.Fprintln(lw, "end thuder")
hc.Authorization = authorize
mc, err := hc.MediaConfig()
if err != nil {
logE.Println("Can not load Media Config", err)
return err
}
for i := range postScripts {
defer func(postScript string) {
cmd := exec.Command(postScript)
cmd.Stdout = lw
cmd.Stderr = lw
err := cmd.Run()
if err != nil {
logE.Println(err)
}
}(postScripts[i])
}
fmt.Fprintln(lw, mc)
err = thuder.PullAndPush(hc, mc)
if err != nil {
logE.Println("Failed ", err)
return err
}
return nil
}
func saveFile(fn string, v interface{}) error {
data, err := json.MarshalIndent(v, "", "\t")
if err != nil {
return err
}
return ioutil.WriteFile(fn, data, 0644)
}
//groupName is set here based on os and arch, so that different pathes and
//binaries can be used for cross platform support. groupName can be changed to
//use environment values for group based specializations.
func groupName() string {
return runtime.GOOS + "-" + runtime.GOARCH
}
//mediaLocation is where removable device is mounted, it could be replaced by
//a command-line flag if using a launcher with more intelligence.
func mediaLocation() string {
if os.PathSeparator == '/' {
return "/media/usb" //by usbmount
}
return "E:\\" //windows
}
var pswd = ""
//authorize your removable device. You must customize this function
func authorize(hc *thuder.HostConfig) bool {
if pswd == "" {
panic("please init pswd in a new pswd.go file," +
" or rewite authorize to use a different method")
}
p, err := ioutil.ReadFile(filepath.Join(hc.DefaultDirectory(), "pswd"))
if err != nil {
logE.Println(err)
return false
}
return (string)(p) == pswd
}
[sample app] rerun sync after an hour even if not replaced
/*
Sample standalone app.
Create pswd.go with something like the following:
func init() {
pswd = "yourpassword3292390"
thuder.SetPinID(17) //to use pin 17 as light indicator
filters = []thuder.Filter{...} //filters for what oprations are allowed by the host
postScript = "..." //a commad to run after files are synchronized.
}
*/
package main
import (
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
logLab "log"
"os"
"os/exec"
"path/filepath"
"runtime"
"time"
"github.com/xiegeo/thuder"
)
var monitor = flag.Bool("monitor", false, "Enables monitoring for new mounts and runs pull and push automatically.")
var hostConfigName = flag.String("host_config", "", "Set the path to the read and write host config. "+
"For security purpurses, this file should not be on the same storage device that thuder is backing up to, "+
"as such is equivalent to allowing all operations listed in that device. "+
"Default value is empty, which disables using config file from overwriting build time settings.")
const sleep = time.Second * 5 //How often to pull mediaLocation to detect new devices.
const rerun = time.Hour //How often to resync even if removable media has not changed.
var logE = logLab.New(os.Stderr, "[thuder err]", logLab.LstdFlags)
// optional build time customizations
var filters []thuder.Filter //set this to set default host filters
var postScripts []string //set this to run after pull/push
func main() {
flag.Parse()
if !*monitor {
hc, err := hostConfig()
if err != nil {
panic(err)
}
runOnce(hc)
return
}
thuder.FlashLED() //flash once for monitoring on
for ; ; time.Sleep(sleep) {
hc, err := hostConfig()
if err != nil {
continue
}
_, err = os.Open(hc.DefaultDirectory())
if err != nil {
//fmt.Println(err)
if !os.IsNotExist(err) {
logE.Println(err)
}
continue
}
runOnce(hc)
fmt.Println("waiting for media to be removed")
w := rerun
for err == nil && w > 0 {
time.Sleep(time.Second)
_, err = os.Open(hc.DefaultDirectory())
w -= time.Second
}
fmt.Println("removed: ", err)
}
}
//loadDefault loads the default HostConfig
func loadDefault() (*thuder.HostConfig, error) {
hc := &thuder.HostConfig{}
hc.MediaLocation = mediaLocation()
uhn, err := thuder.GenerateUniqueHostname()
if err != nil {
return nil, err
}
hc.UniqueHostName = uhn
hc.Filters = filters
hc.Group = groupName()
return hc, nil
}
func hostConfig() (*thuder.HostConfig, error) {
fn := *hostConfigName
if fn == "" {
return loadDefault()
}
file, err := os.Open(fn)
if err != nil {
if os.IsNotExist(err) {
//load and save default HostConfig
hc, err := loadDefault()
if err != nil {
return nil, err
}
err = saveFile(fn, hc)
if err != nil {
logE.Println(err)
}
return hc, nil
}
return nil, err
}
dec := json.NewDecoder(file)
hc := &thuder.HostConfig{}
err = dec.Decode(hc)
if err != nil {
return nil, err
}
//UniqueHostName does not match expected, the file could have been copied from
//a different system. Fix this to avoid name collision.
uhn, err := thuder.GenerateUniqueHostname()
if err != nil {
return nil, err
}
if hc.UniqueHostName != uhn {
hc.UniqueHostName = uhn
err = saveFile(fn, hc)
if err != nil {
logE.Println(err)
}
}
return hc, nil
}
func runOnce(hc *thuder.HostConfig) error {
defer func(a, b io.Writer, c *logLab.Logger) {
thuder.LogErrorOut = a
thuder.LogVerboseOut = b
logE = c
}(thuder.LogErrorOut, thuder.LogVerboseOut, logE)
lw := logger(hc)
thuder.LogErrorOut = lw
thuder.LogVerboseOut = lw
logE = logLab.New(lw, "[thuder err]", logLab.LstdFlags)
fmt.Fprintln(lw, "start thuder ", time.Now())
defer fmt.Fprintln(lw, "end thuder")
hc.Authorization = authorize
mc, err := hc.MediaConfig()
if err != nil {
logE.Println("Can not load Media Config", err)
return err
}
for i := range postScripts {
defer func(postScript string) {
cmd := exec.Command(postScript)
cmd.Stdout = lw
cmd.Stderr = lw
err := cmd.Run()
if err != nil {
logE.Println(err)
}
}(postScripts[i])
}
fmt.Fprintln(lw, mc)
err = thuder.PullAndPush(hc, mc)
if err != nil {
logE.Println("Failed ", err)
return err
}
return nil
}
func saveFile(fn string, v interface{}) error {
data, err := json.MarshalIndent(v, "", "\t")
if err != nil {
return err
}
return ioutil.WriteFile(fn, data, 0644)
}
//groupName is set here based on os and arch, so that different pathes and
//binaries can be used for cross platform support. groupName can be changed to
//use environment values for group based specializations.
func groupName() string {
return runtime.GOOS + "-" + runtime.GOARCH
}
//mediaLocation is where removable device is mounted, it could be replaced by
//a command-line flag if using a launcher with more intelligence.
func mediaLocation() string {
if os.PathSeparator == '/' {
return "/media/usb" //by usbmount
}
return "E:\\" //windows
}
var pswd = ""
//authorize your removable device. You must customize this function
func authorize(hc *thuder.HostConfig) bool {
if pswd == "" {
panic("please init pswd in a new pswd.go file," +
" or rewite authorize to use a different method")
}
p, err := ioutil.ReadFile(filepath.Join(hc.DefaultDirectory(), "pswd"))
if err != nil {
logE.Println(err)
return false
}
return (string)(p) == pswd
}
|
package net_test
import (
"fmt"
"testing"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
inet "github.com/jbenet/go-ipfs/net"
)
// TestConnectednessCorrect starts a few networks, connects a few
// and tests Connectedness value is correct.
func TestConnectednessCorrect(t *testing.T) {
ctx := context.Background()
nets := make([]inet.Network, 4)
for i := 0; i < 4; i++ {
nets[i] = GenNetwork(t, ctx)
}
// connect 0-1, 0-2, 0-3, 1-2, 2-3
dial := func(a, b inet.Network) {
DivulgeAddresses(b, a)
if err := a.DialPeer(ctx, b.LocalPeer()); err != nil {
t.Fatalf("Failed to dial: %s", err)
}
}
dial(nets[0], nets[1])
dial(nets[0], nets[3])
dial(nets[1], nets[2])
dial(nets[3], nets[2])
// test those connected show up correctly
// test connected
testConnectedness(t, nets[0], nets[1], inet.Connected)
testConnectedness(t, nets[0], nets[3], inet.Connected)
testConnectedness(t, nets[1], nets[2], inet.Connected)
testConnectedness(t, nets[3], nets[2], inet.Connected)
// test not connected
testConnectedness(t, nets[0], nets[2], inet.NotConnected)
testConnectedness(t, nets[1], nets[3], inet.NotConnected)
for _, n := range nets {
n.Close()
}
}
func testConnectedness(t *testing.T, a, b inet.Network, c inet.Connectedness) {
es := "%s is connected to %s, but Connectedness incorrect. %s %s"
if a.Connectedness(b.LocalPeer()) != c {
t.Errorf(es, a, b, printConns(a), printConns(b))
}
// test symmetric case
if b.Connectedness(a.LocalPeer()) != c {
t.Errorf(es, b, a, printConns(b), printConns(a))
}
}
func printConns(n inet.Network) string {
s := fmt.Sprintf("Connections in %s:\n", n)
for _, c := range n.Conns() {
s = s + fmt.Sprintf("- %s\n", c)
}
return s
}
misc naming
License: MIT
Signed-off-by: Brian Tiger Chow <760e7dab2836853c63805033e514668301fa9c47@perfmode.com>
package net_test
import (
"fmt"
"testing"
context "github.com/jbenet/go-ipfs/Godeps/_workspace/src/code.google.com/p/go.net/context"
inet "github.com/jbenet/go-ipfs/net"
)
// TestConnectednessCorrect starts a few networks, connects a few
// and tests Connectedness value is correct.
func TestConnectednessCorrect(t *testing.T) {
ctx := context.Background()
nets := make([]inet.Network, 4)
for i := 0; i < 4; i++ {
nets[i] = GenNetwork(t, ctx)
}
// connect 0-1, 0-2, 0-3, 1-2, 2-3
dial := func(a, b inet.Network) {
DivulgeAddresses(b, a)
if err := a.DialPeer(ctx, b.LocalPeer()); err != nil {
t.Fatalf("Failed to dial: %s", err)
}
}
dial(nets[0], nets[1])
dial(nets[0], nets[3])
dial(nets[1], nets[2])
dial(nets[3], nets[2])
// test those connected show up correctly
// test connected
expectConnectedness(t, nets[0], nets[1], inet.Connected)
expectConnectedness(t, nets[0], nets[3], inet.Connected)
expectConnectedness(t, nets[1], nets[2], inet.Connected)
expectConnectedness(t, nets[3], nets[2], inet.Connected)
// test not connected
expectConnectedness(t, nets[0], nets[2], inet.NotConnected)
expectConnectedness(t, nets[1], nets[3], inet.NotConnected)
for _, n := range nets {
n.Close()
}
}
func expectConnectedness(t *testing.T, a, b inet.Network, expected inet.Connectedness) {
es := "%s is connected to %s, but Connectedness incorrect. %s %s"
if a.Connectedness(b.LocalPeer()) != expected {
t.Errorf(es, a, b, printConns(a), printConns(b))
}
// test symmetric case
if b.Connectedness(a.LocalPeer()) != expected {
t.Errorf(es, b, a, printConns(b), printConns(a))
}
}
func printConns(n inet.Network) string {
s := fmt.Sprintf("Connections in %s:\n", n)
for _, c := range n.Conns() {
s = s + fmt.Sprintf("- %s\n", c)
}
return s
}
|
package network
import (
"errors"
"fmt"
"io"
"sync"
gproto "github.com/golang/protobuf/proto"
"github.com/micro/go-micro/codec"
pb "github.com/micro/go-micro/network/proto"
"github.com/micro/go-micro/transport"
)
type link struct {
// the embedded node
*node
sync.RWMutex
// the link id
id string
// the send queue to the socket
queue chan *Message
// codec we use to marshal things
codec codec.Marshaler
// the socket for this link
socket transport.Socket
// the lease for this link
lease *pb.Lease
// determines the cost of the link
// based on queue length and roundtrip
length int
weight int
}
// link methods
// process processe messages on the send queue
func (l *link) process() {
for {
select {
case m := <-l.queue:
if err := l.send(m, nil); err != nil {
return
}
}
}
}
// accept waits for the connect message from the remote end
// if it receives anything else it throws an error
func (l *link) accept() error {
for {
m := new(transport.Message)
err := l.socket.Recv(m)
if err == io.EOF {
return nil
}
if err != nil {
return err
}
// TODO: pick a reliable header
event := m.Header["Micro-Method"]
switch event {
// connect event
case "Connect":
// process connect events from network.Connect()
// these are new connections to join the network
// decode the connection event
conn := new(pb.Connect)
if err := l.codec.Unmarshal(m.Body, conn); err != nil {
// skip error
continue
}
// get the existing lease if it exists
lease := conn.Lease
// if there's no lease create a new one
if lease == nil {
// create a new lease/node
lease = l.node.network.lease()
}
// send back a lease offer for the node
if err := l.send(&Message{
Header: map[string]string{
"Micro-Method": "Lease",
},
}, lease); err != nil {
return err
}
// the lease is saved
l.Lock()
l.lease = lease
l.Unlock()
// we've connected
// start processing the messages
go l.process()
return nil
case "Close":
l.Close()
return errors.New("connection closed")
default:
return errors.New("unknown method: " + event)
}
}
}
// connect sends a connect request and waits on a lease.
// this is for a new connection. in the event we send
// an existing lease, the same lease should be returned.
// if it differs then we assume our address for this link
// is different...
func (l *link) connect() error {
// get the current lease
l.RLock()
lease := l.lease
l.RUnlock()
// send a lease request
if err := l.send(&Message{
Header: map[string]string{
"Micro-Method": "Connect",
},
}, &pb.Connect{Lease: lease}); err != nil {
return err
}
// create the new things
tm := new(Message)
newLease := new(pb.Lease)
// wait for a response, hopefully a lease
if err := l.recv(tm, newLease); err != nil {
return err
}
event := tm.Header["Micro-Method"]
// check the method
switch event {
case "Lease":
// save the lease
l.Lock()
l.lease = newLease
l.Unlock()
// start processing the messages
go l.process()
case "Close":
l.socket.Close()
return errors.New("connection closed")
default:
return errors.New("unable to attain lease")
}
return nil
}
// send a message over the link
func (l *link) send(m *Message, v interface{}) error {
tm := new(transport.Message)
tm.Header = m.Header
tm.Body = m.Body
// set the body if not nil
// we're assuming this is network message
if v != nil {
// encode the data
b, err := l.codec.Marshal(v)
if err != nil {
return err
}
// set the content type
tm.Header["Content-Type"] = "application/protobuf"
// set the marshalled body
tm.Body = b
}
fmt.Printf("link %s sending %+v %+v\n", l.id, m, v)
// send via the transport socket
return l.socket.Send(&transport.Message{
Header: m.Header,
Body: m.Body,
})
}
// recv a message on the link
func (l *link) recv(m *Message, v interface{}) error {
if m.Header == nil {
m.Header = make(map[string]string)
}
tm := new(transport.Message)
// receive the transport message
if err := l.socket.Recv(tm); err != nil {
return err
}
fmt.Printf("link %s receiving %+v %+v\n", l.id, tm, v)
// set the message
m.Header = tm.Header
m.Body = tm.Body
// bail early
if v == nil {
return nil
}
// try unmarshal the body
// skip if there's no content-type
if tm.Header["Content-Type"] != "application/protobuf" {
return nil
}
// return unmarshalled
return l.codec.Unmarshal(m.Body, v.(gproto.Message))
}
// Close the link
func (l *link) Close() error {
// send a final close message
l.socket.Send(&transport.Message{
Header: map[string]string{
"Micro-Method": "Close",
},
})
// close the socket
return l.socket.Close()
}
// returns the node id
func (l *link) Id() string {
l.RLock()
defer l.RUnlock()
if l.lease == nil {
return ""
}
return l.lease.Node.Id
}
// Address of the node we're connected to
func (l *link) Address() string {
l.RLock()
defer l.RUnlock()
if l.lease == nil {
return l.socket.Remote()
}
// the node in the lease
return l.lease.Node.Address
}
func (l *link) Length() int {
l.RLock()
defer l.RUnlock()
return l.length
}
func (l *link) Weight() int {
l.RLock()
defer l.RUnlock()
return l.weight
}
func (l *link) Accept() (*Message, error) {
m := new(Message)
err := l.recv(m, nil)
if err != nil {
return nil, err
}
return m, nil
}
func (l *link) Send(m *Message) error {
return l.send(m, nil)
}
Make the link use debug
package network
import (
"errors"
"io"
"sync"
"github.com/micro/go-micro/util/log"
gproto "github.com/golang/protobuf/proto"
"github.com/micro/go-micro/codec"
pb "github.com/micro/go-micro/network/proto"
"github.com/micro/go-micro/transport"
)
type link struct {
// the embedded node
*node
sync.RWMutex
// the link id
id string
// the send queue to the socket
queue chan *Message
// codec we use to marshal things
codec codec.Marshaler
// the socket for this link
socket transport.Socket
// the lease for this link
lease *pb.Lease
// determines the cost of the link
// based on queue length and roundtrip
length int
weight int
}
// link methods
// process processe messages on the send queue
func (l *link) process() {
for {
select {
case m := <-l.queue:
if err := l.send(m, nil); err != nil {
return
}
}
}
}
// accept waits for the connect message from the remote end
// if it receives anything else it throws an error
func (l *link) accept() error {
for {
m := new(transport.Message)
err := l.socket.Recv(m)
if err == io.EOF {
return nil
}
if err != nil {
return err
}
// TODO: pick a reliable header
event := m.Header["Micro-Method"]
switch event {
// connect event
case "Connect":
// process connect events from network.Connect()
// these are new connections to join the network
// decode the connection event
conn := new(pb.Connect)
if err := l.codec.Unmarshal(m.Body, conn); err != nil {
// skip error
continue
}
// get the existing lease if it exists
lease := conn.Lease
// if there's no lease create a new one
if lease == nil {
// create a new lease/node
lease = l.node.network.lease()
}
// send back a lease offer for the node
if err := l.send(&Message{
Header: map[string]string{
"Micro-Method": "Lease",
},
}, lease); err != nil {
return err
}
// the lease is saved
l.Lock()
l.lease = lease
l.Unlock()
// we've connected
// start processing the messages
go l.process()
return nil
case "Close":
l.Close()
return errors.New("connection closed")
default:
return errors.New("unknown method: " + event)
}
}
}
// connect sends a connect request and waits on a lease.
// this is for a new connection. in the event we send
// an existing lease, the same lease should be returned.
// if it differs then we assume our address for this link
// is different...
func (l *link) connect() error {
// get the current lease
l.RLock()
lease := l.lease
l.RUnlock()
// send a lease request
if err := l.send(&Message{
Header: map[string]string{
"Micro-Method": "Connect",
},
}, &pb.Connect{Lease: lease}); err != nil {
return err
}
// create the new things
tm := new(Message)
newLease := new(pb.Lease)
// wait for a response, hopefully a lease
if err := l.recv(tm, newLease); err != nil {
return err
}
event := tm.Header["Micro-Method"]
// check the method
switch event {
case "Lease":
// save the lease
l.Lock()
l.lease = newLease
l.Unlock()
// start processing the messages
go l.process()
case "Close":
l.socket.Close()
return errors.New("connection closed")
default:
return errors.New("unable to attain lease")
}
return nil
}
// send a message over the link
func (l *link) send(m *Message, v interface{}) error {
tm := new(transport.Message)
tm.Header = m.Header
tm.Body = m.Body
// set the body if not nil
// we're assuming this is network message
if v != nil {
// encode the data
b, err := l.codec.Marshal(v)
if err != nil {
return err
}
// set the content type
tm.Header["Content-Type"] = "application/protobuf"
// set the marshalled body
tm.Body = b
}
log.Debugf("link %s sending %+v %+v\n", l.id, m, v)
// send via the transport socket
return l.socket.Send(&transport.Message{
Header: m.Header,
Body: m.Body,
})
}
// recv a message on the link
func (l *link) recv(m *Message, v interface{}) error {
if m.Header == nil {
m.Header = make(map[string]string)
}
tm := new(transport.Message)
// receive the transport message
if err := l.socket.Recv(tm); err != nil {
return err
}
log.Debugf("link %s receiving %+v %+v\n", l.id, tm, v)
// set the message
m.Header = tm.Header
m.Body = tm.Body
// bail early
if v == nil {
return nil
}
// try unmarshal the body
// skip if there's no content-type
if tm.Header["Content-Type"] != "application/protobuf" {
return nil
}
// return unmarshalled
return l.codec.Unmarshal(m.Body, v.(gproto.Message))
}
// Close the link
func (l *link) Close() error {
// send a final close message
l.socket.Send(&transport.Message{
Header: map[string]string{
"Micro-Method": "Close",
},
})
// close the socket
return l.socket.Close()
}
// returns the node id
func (l *link) Id() string {
l.RLock()
defer l.RUnlock()
if l.lease == nil {
return ""
}
return l.lease.Node.Id
}
// Address of the node we're connected to
func (l *link) Address() string {
l.RLock()
defer l.RUnlock()
if l.lease == nil {
return l.socket.Remote()
}
// the node in the lease
return l.lease.Node.Address
}
func (l *link) Length() int {
l.RLock()
defer l.RUnlock()
return l.length
}
func (l *link) Weight() int {
l.RLock()
defer l.RUnlock()
return l.weight
}
func (l *link) Accept() (*Message, error) {
m := new(Message)
err := l.recv(m, nil)
if err != nil {
return nil, err
}
return m, nil
}
func (l *link) Send(m *Message) error {
return l.send(m, nil)
}
|
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package network
import (
"bufio"
"bytes"
"crypto/x509"
"encoding/binary"
"fmt"
"io"
"math"
"net"
"sync"
"sync/atomic"
"time"
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/network/message"
"github.com/ava-labs/avalanchego/utils"
"github.com/ava-labs/avalanchego/utils/constants"
"github.com/ava-labs/avalanchego/utils/formatting"
"github.com/ava-labs/avalanchego/utils/hashing"
"github.com/ava-labs/avalanchego/utils/timer"
"github.com/ava-labs/avalanchego/utils/wrappers"
)
// The signature of a peer's certificate on the byte representation
// of the peer's IP and time, and the time, in Unix seconds.
type signedPeerIP struct {
ip utils.IPDesc
time uint64
signature []byte
}
// alias is a secondary IP address where a peer
// was reached
type alias struct {
// ip where peer was reached
ip utils.IPDesc
// expiry is network time when the ip should be released
expiry time.Time
}
type peer struct {
net *network // network this peer is part of
// True if this peer has sent us a valid Version message and
// is running a compatible version.
// Only modified on the connection's reader routine.
gotVersion utils.AtomicBool
// True if this peer has sent us a valid PeerList message.
// Only modified on the connection's reader routine.
gotPeerList utils.AtomicBool
// only send the version to this peer on handling a getVersion message if
// a version hasn't already been sent.
versionSent utils.AtomicBool
versionWithSubnetsSent utils.AtomicBool
// only send the peerlist to this peer on handling a getPeerlist message if
// a peerlist hasn't already been sent.
peerListSent utils.AtomicBool
// True if the peer:
// * Has sent us a Version message
// * Has sent us a PeerList message
// * Is a compatible version
// Only modified on the connection's reader routine.
finishedHandshake utils.AtomicBool
// only close the peer once
once sync.Once
// if the close function has been called.
closed utils.AtomicBool
// queue of messages to be sent to this peer
sendQueue [][]byte
// Signalled when a message is added to [sendQueue],
// and when [p.closed] is set to true.
// [sendQueueCond.L] must be held when using [sendQueue].
sendQueueCond *sync.Cond
// ip may or may not be set when the peer is first started. is only modified
// on the connection's reader routine.
ip utils.IPDesc
// ipLock must be held when accessing [ip].
ipLock sync.RWMutex
// aliases is a list of IPs other than [ip] that we have connected to
// this peer at.
aliases []alias
// aliasTimer triggers the release of expired records from [aliases].
aliasTimer *timer.Timer
// aliasLock must be held when accessing [aliases] or [aliasTimer].
aliasLock sync.Mutex
// node ID of this peer.
nodeID ids.ShortID
// the connection object that is used to read/write messages from
conn net.Conn
// Version that this peer reported during the handshake.
// Set when we process the Version message from this peer.
versionStruct, versionStr utils.AtomicInterface
// Unix time of the last message sent and received respectively
// Must only be accessed atomically
lastSent, lastReceived int64
tickerCloser chan struct{}
// ticker processes
tickerOnce sync.Once
// [cert] is this peer's certificate (specifically the leaf of the certificate chain they provided)
cert *x509.Certificate
// sigAndTime contains a struct of type sigAndTime.
// The signature is [cert]'s signature on the peer's IP, concatenated with
// the peer's local time when it sent a Version.
// The time in [sigAndTime] is the one mentioned above.
sigAndTime utils.AtomicInterface
// Used in [handleAcceptedFrontier], [handleAccepted],
// [handleGetAccepted], [handleChits].
// We use this one ids.Set rather than allocating one per method call.
// Should be cleared before use.
// Should only be used in peer's reader goroutine.
idSet ids.Set
// True if we can compress messages sent to this peer
canHandleCompressed utils.AtomicBool
// trackedSubnets hold subnetIDs that this peer is interested in.
trackedSubnets ids.Set
}
// newPeer returns a properly initialized *peer.
func newPeer(net *network, conn net.Conn, ip utils.IPDesc) *peer {
p := &peer{
sendQueueCond: sync.NewCond(&sync.Mutex{}),
net: net,
conn: conn,
ip: ip,
tickerCloser: make(chan struct{}),
}
p.aliasTimer = timer.NewTimer(p.releaseExpiredAliases)
p.trackedSubnets.Add(constants.PrimaryNetworkID)
return p
}
// assume the [stateLock] is held
func (p *peer) Start() {
go func() {
// Make sure that the version is the first message sent
p.sendVersionWithSubnets()
p.sendVersion()
go p.ReadMessages()
go p.WriteMessages()
}()
}
func (p *peer) StartTicker() {
go p.requestFinishHandshake()
go p.sendPings()
go p.monitorAliases()
}
func (p *peer) sendPings() {
sendPingsTicker := time.NewTicker(p.net.pingFrequency)
defer sendPingsTicker.Stop()
for {
select {
case <-sendPingsTicker.C:
closed := p.closed.GetValue()
if closed {
return
}
p.sendPing()
case <-p.tickerCloser:
return
}
}
}
// request missing handshake messages from the peer
func (p *peer) requestFinishHandshake() {
finishHandshakeTicker := time.NewTicker(p.net.getVersionTimeout)
defer finishHandshakeTicker.Stop()
for {
select {
case <-finishHandshakeTicker.C:
if p.finishedHandshake.GetValue() {
return
}
if p.closed.GetValue() {
return
}
if !p.gotVersion.GetValue() {
p.sendGetVersion()
}
if !p.gotPeerList.GetValue() {
p.sendGetPeerList()
}
case <-p.tickerCloser:
return
}
}
}
// monitorAliases periodically attempts
// to release timed out alias IPs of the
// peer.
//
// monitorAliases will acquire [stateLock]
// when an alias is released.
func (p *peer) monitorAliases() {
go func() {
<-p.tickerCloser
p.aliasTimer.Stop()
}()
p.aliasTimer.Dispatch()
}
// Read and handle messages from this peer.
// When this method returns, the connection is closed.
func (p *peer) ReadMessages() {
defer p.Close()
// Continuously read and handle messages from this peer.
reader := bufio.NewReader(p.conn)
msgLenBytes := make([]byte, wrappers.IntLen)
for {
// Time out and close connection if we can't read message length
if err := p.conn.SetReadDeadline(p.nextTimeout()); err != nil {
p.net.log.Verbo("error setting the connection read timeout on %s%s at %s %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
return
}
// Read the message length
if _, err := io.ReadFull(reader, msgLenBytes); err != nil {
p.net.log.Verbo("error reading from %s%s at %s: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
return
}
// Parse the message length
msgLen := binary.BigEndian.Uint32(msgLenBytes)
// Make sure the message length is valid.
if int64(msgLen) > p.net.maxMessageSize {
p.net.log.Verbo("too large message length %d from %s%s at %s", msgLen, constants.NodeIDPrefix, p.nodeID, p.getIP())
return
}
// Wait until the throttler says we can proceed to read the message.
// Note that when we are done handling this message, or give up
// trying to read it, we must call [p.net.msgThrottler.Release]
// to give back the bytes used by this message.
p.net.inboundMsgThrottler.Acquire(uint64(msgLen), p.nodeID)
// Invariant: When done processing this message, onFinishedHandling() is called.
// If this is not honored, the message throttler will leak until no new messages can be read.
// You can look at message throttler metrics to verify that there is no leak.
onFinishedHandling := func() { p.net.inboundMsgThrottler.Release(uint64(msgLen), p.nodeID) }
// Time out and close connection if we can't read message
if err := p.conn.SetReadDeadline(p.nextTimeout()); err != nil {
p.net.log.Verbo("error setting the connection read timeout on %s%s at %s %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
onFinishedHandling()
return
}
// Read the message
msgBytes := make([]byte, msgLen)
if _, err := io.ReadFull(reader, msgBytes); err != nil {
p.net.log.Verbo("error reading from %s%s at %s: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
onFinishedHandling()
return
}
p.net.log.Verbo("parsing message from %s%s at %s:\n%s", constants.NodeIDPrefix, p.nodeID, p.getIP(), formatting.DumpBytes{Bytes: msgBytes})
// Parse the message
msg, err := p.net.c.Parse(msgBytes, p.canHandleCompressed.GetValue())
if err != nil {
p.net.log.Verbo("failed to parse message from %s%s at %s:\n%s\n%s", constants.NodeIDPrefix, p.nodeID, p.getIP(), formatting.DumpBytes{Bytes: msgBytes}, err)
// Couldn't parse the message. Read the next one.
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
continue
}
// Handle the message. Note that when we are done handling
// this message, we must call [p.net.msgThrottler.Release]
// to release the bytes used by this message. See MsgThrottler.
p.handle(msg, onFinishedHandling)
}
}
// attempt to write messages to the peer
func (p *peer) WriteMessages() {
defer p.Close()
var reader bytes.Reader
writer := bufio.NewWriter(p.conn)
for { // When this loop exits, p.sendQueueCond.L is unlocked
p.sendQueueCond.L.Lock()
for {
if p.closed.GetValue() {
p.sendQueueCond.L.Unlock()
return
}
if len(p.sendQueue) > 0 {
// There is a message to send
break
}
// Wait until there is a message to send
p.sendQueueCond.Wait()
}
msg := p.sendQueue[0]
p.sendQueue = p.sendQueue[1:]
p.sendQueueCond.L.Unlock()
msgLen := uint32(len(msg))
p.net.outboundMsgThrottler.Release(uint64(msgLen), p.nodeID)
p.net.log.Verbo("sending message to %s%s at %s:\n%s", constants.NodeIDPrefix, p.nodeID, p.getIP(), formatting.DumpBytes{Bytes: msg})
msgb := [wrappers.IntLen]byte{}
binary.BigEndian.PutUint32(msgb[:], msgLen)
for _, byteSlice := range [2][]byte{msgb[:], msg} {
reader.Reset(byteSlice)
if err := p.conn.SetWriteDeadline(p.nextTimeout()); err != nil {
p.net.log.Verbo("error setting write deadline to %s%s at %s due to: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
return
}
if _, err := io.CopyN(writer, &reader, int64(len((byteSlice)))); err != nil {
p.net.log.Verbo("error writing to %s%s at %s due to: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
return
}
p.tickerOnce.Do(p.StartTicker)
}
// Make sure the peer got the entire message
if err := writer.Flush(); err != nil {
p.net.log.Verbo("couldn't flush writer to %s%s at %s: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
return
}
now := p.net.clock.Time().Unix()
atomic.StoreInt64(&p.lastSent, now)
atomic.StoreInt64(&p.net.lastMsgSentTime, now)
p.net.byteSlicePool.Put(msg)
}
}
// send assumes that the [stateLock] is not held.
// If [canModifyMsg], [msg] may be modified by this method.
// If ![canModifyMsg], [msg] will not be modified by this method.
// [canModifyMsg] should be false if [msg] is sent in a loop, for example/.
func (p *peer) Send(msg message.Message, canModifyMsg bool) bool {
msgBytes := msg.Bytes()
msgLen := int64(len(msgBytes))
// Acquire space on the outbound message queue, or drop [msg] if we can't
dropMsg := !p.net.outboundMsgThrottler.Acquire(uint64(msgLen), p.nodeID)
if dropMsg {
p.net.log.Debug("dropping %s message to %s%s at %s due to rate-limiting", msg.Op(), constants.NodeIDPrefix, p.nodeID, p.getIP())
return false
}
// Invariant: must call p.net.outboundMsgThrottler.Release(uint64(msgLen), p.nodeID)
// when done sending [msg] or when we give up sending [msg]
p.sendQueueCond.L.Lock()
defer p.sendQueueCond.L.Unlock()
if p.closed.GetValue() {
p.net.log.Debug("dropping message to %s%s at %s due to a closed connection", constants.NodeIDPrefix, p.nodeID, p.getIP())
p.net.outboundMsgThrottler.Release(uint64(msgLen), p.nodeID)
return false
}
// If the flag says to not modify [msgBytes], copy it so that the copy,
// not [msgBytes], will be put back into the []byte pool after it's written.
toSend := msgBytes
if !canModifyMsg {
toSend = make([]byte, msgLen)
copy(toSend, msgBytes)
}
p.sendQueue = append(p.sendQueue, toSend)
p.sendQueueCond.Signal()
return true
}
// assumes the [stateLock] is not held
func (p *peer) handle(msg message.Message, onFinishedHandling func()) {
now := p.net.clock.Time()
atomic.StoreInt64(&p.lastReceived, now.Unix())
atomic.StoreInt64(&p.net.lastMsgReceivedTime, now.Unix())
msgLen := uint64(len(msg.Bytes()))
op := msg.Op()
msgMetrics := p.net.message(op)
if msgMetrics == nil {
p.net.log.Error("dropping an unknown message from %s%s at %s with op %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), op)
onFinishedHandling()
return
}
msgMetrics.numReceived.Inc()
msgMetrics.receivedBytes.Add(float64(msgLen))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
msgMetrics.savedReceivedBytes.Observe(float64(saved))
}
switch op { // Network-related message types
case message.Version:
p.handleVersion(msg)
onFinishedHandling()
return
case message.VersionWithSubnets:
p.handleVersionWithSubnets(msg)
onFinishedHandling()
return
case message.GetVersion:
p.handleGetVersion(msg)
onFinishedHandling()
return
case message.Ping:
p.handlePing(msg)
onFinishedHandling()
return
case message.Pong:
p.handlePong(msg)
onFinishedHandling()
return
case message.GetPeerList:
p.handleGetPeerList(msg)
onFinishedHandling()
return
case message.PeerList:
p.handlePeerList(msg)
onFinishedHandling()
return
}
if !p.finishedHandshake.GetValue() {
p.net.log.Debug("dropping %s from %s%s at %s because handshake isn't finished", op, constants.NodeIDPrefix, p.nodeID, p.getIP())
// attempt to finish the handshake
if !p.gotVersion.GetValue() {
p.sendGetVersion()
}
if !p.gotPeerList.GetValue() {
p.sendGetPeerList()
}
onFinishedHandling()
return
}
switch op { // Consensus-related messages
case message.GetAcceptedFrontier:
p.handleGetAcceptedFrontier(msg, onFinishedHandling)
case message.AcceptedFrontier:
p.handleAcceptedFrontier(msg, onFinishedHandling)
case message.GetAccepted:
p.handleGetAccepted(msg, onFinishedHandling)
case message.Accepted:
p.handleAccepted(msg, onFinishedHandling)
case message.Get:
p.handleGet(msg, onFinishedHandling)
case message.GetAncestors:
p.handleGetAncestors(msg, onFinishedHandling)
case message.Put:
p.handlePut(msg, onFinishedHandling)
case message.MultiPut:
p.handleMultiPut(msg, onFinishedHandling)
case message.PushQuery:
p.handlePushQuery(msg, onFinishedHandling)
case message.PullQuery:
p.handlePullQuery(msg, onFinishedHandling)
case message.Chits:
p.handleChits(msg, onFinishedHandling)
default:
p.net.log.Debug("dropping an unknown message from %s%s at %s with op %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), op)
onFinishedHandling()
}
}
// assumes the [stateLock] is not held
func (p *peer) Close() { p.once.Do(p.close) }
// assumes only [peer.Close] calls this.
// By the time this message returns, [p] has been removed from [p.net.peers]
func (p *peer) close() {
// If the connection is closing, we can immediately cancel the ticker
// goroutines.
close(p.tickerCloser)
p.closed.SetValue(true)
if err := p.conn.Close(); err != nil {
p.net.log.Debug("closing connection to %s%s at %s resulted in an error: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
}
p.sendQueueCond.L.Lock()
// Release the bytes of the unsent messages to the outbound message throttler
for i := 0; i < len(p.sendQueue); i++ {
p.net.outboundMsgThrottler.Release(uint64(len(p.sendQueue[i])), p.nodeID)
}
p.sendQueue = nil
p.sendQueueCond.L.Unlock()
// Per [p.sendQueueCond]'s spec, it is signalled when [p.closed] is set to true
// so that we exit the WriteMessages goroutine.
// Since [p.closed] is now true, nothing else will be put on [p.sendQueue]
p.sendQueueCond.Signal()
p.net.disconnected(p)
}
// assumes the [stateLock] is not held
func (p *peer) sendGetVersion() {
msg, err := p.net.b.GetVersion()
p.net.log.AssertNoError(err)
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.metrics.getVersion.numSent.Inc()
p.net.metrics.getVersion.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.getVersion.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
} else {
p.net.metrics.getVersion.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the [stateLock] is not held
func (p *peer) sendVersion() {
p.net.stateLock.RLock()
myIP := p.net.ip.IP()
myVersionTime, myVersionSig, err := p.net.getVersion(myIP)
if err != nil {
p.net.stateLock.RUnlock()
return
}
msg, err := p.net.b.Version(
p.net.networkID,
p.net.nodeID,
p.net.clock.Unix(),
myIP,
p.net.versionCompatibility.Version().String(),
myVersionTime,
myVersionSig,
)
p.net.stateLock.RUnlock()
p.net.log.AssertNoError(err)
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.metrics.version.numSent.Inc()
p.net.metrics.version.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.version.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
p.versionSent.SetValue(true)
} else {
p.net.metrics.version.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the [stateLock] is not held
func (p *peer) sendVersionWithSubnets() {
p.net.stateLock.RLock()
myIP := p.net.ip.IP()
myVersionTime, myVersionSig, err := p.net.getVersion(myIP)
if err != nil {
p.net.stateLock.RUnlock()
return
}
whitelistedSubnets := p.net.whitelistedSubnets
msg, err := p.net.b.VersionWithSubnets(
p.net.networkID,
p.net.nodeID,
p.net.clock.Unix(),
myIP,
p.net.versionCompatibility.Version().String(),
myVersionTime,
myVersionSig,
whitelistedSubnets.List(),
)
p.net.stateLock.RUnlock()
p.net.log.AssertNoError(err)
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.metrics.versionWithSubnets.numSent.Inc()
p.net.metrics.versionWithSubnets.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.versionWithSubnets.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
p.versionWithSubnetsSent.SetValue(true)
} else {
p.net.metrics.versionWithSubnets.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the [stateLock] is not held
func (p *peer) sendGetPeerList() {
msg, err := p.net.b.GetPeerList()
p.net.log.AssertNoError(err)
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.getPeerlist.numSent.Inc()
p.net.getPeerlist.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.getPeerlist.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
} else {
p.net.getPeerlist.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the stateLock is not held
func (p *peer) sendPeerList() {
peers, err := p.net.validatorIPs()
if err != nil {
return
}
// Compress this message only if the peer can handle compressed
// messages and we have compression enabled
canHandleCompressed := p.canHandleCompressed.GetValue()
msg, err := p.net.b.PeerList(peers, canHandleCompressed, canHandleCompressed && p.net.compressionEnabled)
if err != nil {
p.net.log.Warn("failed to send PeerList to %s%s at %s: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
return
}
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.peerList.numSent.Inc()
p.net.peerList.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.peerList.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
p.peerListSent.SetValue(true)
} else {
p.net.peerList.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the [stateLock] is not held
func (p *peer) sendPing() {
msg, err := p.net.b.Ping()
p.net.log.AssertNoError(err)
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.ping.numSent.Inc()
p.net.ping.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.ping.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
} else {
p.net.ping.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the [stateLock] is not held
func (p *peer) sendPong() {
msg, err := p.net.b.Pong()
p.net.log.AssertNoError(err)
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.pong.numSent.Inc()
p.net.pong.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.pong.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
} else {
p.net.pong.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the [stateLock] is not held
func (p *peer) handleGetVersion(_ message.Message) {
if !p.versionWithSubnetsSent.GetValue() {
p.sendVersionWithSubnets()
}
if !p.versionSent.GetValue() {
p.sendVersion()
}
}
// assumes the [stateLock] is not held
func (p *peer) handleVersion(msg message.Message) {
p.versionCheck(msg, false)
}
// assumes the [stateLock] is not held
func (p *peer) handleVersionWithSubnets(msg message.Message) {
p.versionCheck(msg, true)
}
// assumes the [stateLock] is not held
func (p *peer) versionCheck(msg message.Message, isVersionWithSubnets bool) {
switch {
case p.gotVersion.GetValue():
p.net.log.Verbo("dropping duplicated version message from %s%s at %s", constants.NodeIDPrefix, p.nodeID, p.getIP())
return
case msg.Get(message.NodeID).(uint32) == p.net.nodeID:
p.net.log.Debug("peer at %s has same node ID as me", p.getIP())
p.discardMyIP()
return
case msg.Get(message.NetworkID).(uint32) != p.net.networkID:
p.net.log.Debug(
"network ID of %s%s at %s (%d) doesn't match our's (%d)",
constants.NodeIDPrefix, p.nodeID, p.getIP(), msg.Get(message.NetworkID).(uint32), p.net.networkID,
)
p.discardIP()
return
case p.closed.GetValue():
return
}
myTime := float64(p.net.clock.Unix())
peerTime := float64(msg.Get(message.MyTime).(uint64))
if math.Abs(peerTime-myTime) > p.net.maxClockDifference.Seconds() {
if p.net.beacons.Contains(p.nodeID) {
p.net.log.Warn(
"beacon %s%s at %s reports time (%d) that is too far out of sync with our's (%d)",
constants.NodeIDPrefix, p.nodeID, p.getIP(), uint64(peerTime), uint64(myTime),
)
} else {
p.net.log.Debug(
"peer %s%s at %s reports time (%d) that is too far out of sync with our's (%d)",
constants.NodeIDPrefix, p.nodeID, p.getIP(), uint64(peerTime), uint64(myTime),
)
}
p.discardIP()
return
}
peerVersionStr := msg.Get(message.VersionStr).(string)
peerVersion, err := p.net.parser.Parse(peerVersionStr)
if err != nil {
p.net.log.Debug("version of %s%s at %s could not be parsed: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
p.discardIP()
p.net.metrics.failedToParse.Inc()
return
}
if p.net.versionCompatibility.Version().Before(peerVersion) {
if p.net.beacons.Contains(p.nodeID) {
p.net.log.Info(
"beacon %s%s at %s attempting to connect with newer version %s. You may want to update your client",
constants.NodeIDPrefix, p.nodeID, p.getIP(), peerVersion,
)
} else {
p.net.log.Debug(
"peer %s%s at %s attempting to connect with newer version %s. You may want to update your client",
constants.NodeIDPrefix, p.nodeID, p.getIP(), peerVersion,
)
}
}
if err := p.net.versionCompatibility.Compatible(peerVersion); err != nil {
p.net.log.Verbo("peer %s%s at %s version (%s) not compatible: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), peerVersion, err)
p.discardIP()
return
}
peerIP := msg.Get(message.IP).(utils.IPDesc)
versionTime := msg.Get(message.VersionTime).(uint64)
p.net.stateLock.RLock()
latestPeerIP := p.net.latestPeerIP[p.nodeID]
p.net.stateLock.RUnlock()
if latestPeerIP.time > versionTime {
p.discardIP()
return
}
if float64(versionTime)-myTime > p.net.maxClockDifference.Seconds() {
p.net.log.Debug(
"peer %s%s at %s attempting to connect with version timestamp (%d) too far in the future",
constants.NodeIDPrefix, p.nodeID, p.getIP(), latestPeerIP.time,
)
p.discardIP()
return
}
sig := msg.Get(message.SigBytes).([]byte)
signed := ipAndTimeBytes(peerIP, versionTime)
if err := p.cert.CheckSignature(p.cert.SignatureAlgorithm, signed, sig); err != nil {
p.net.log.Debug("signature verification failed for %s%s at %s: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
p.discardIP()
return
}
p.canHandleCompressed.SetValue(peerVersion.Compare(minVersionCanHandleCompressed) >= 0)
signedPeerIP := signedPeerIP{
ip: peerIP,
time: versionTime,
signature: sig,
}
p.net.stateLock.Lock()
p.net.latestPeerIP[p.nodeID] = signedPeerIP
p.net.stateLock.Unlock()
p.sigAndTime.SetValue(signedPeerIP)
if isVersionWithSubnets {
subnetIDsBytes := msg.Get(message.TrackedSubnets).([][]byte)
for _, subnetIDBytes := range subnetIDsBytes {
subnetID, err := ids.ToID(subnetIDBytes)
if err != nil {
p.net.log.Debug("peer %s%s sent wrong subnetID %w", constants.NodeIDPrefix, p.nodeID, err)
continue
}
// add only if we also track this subnet
if p.net.whitelistedSubnets.Contains(subnetID) {
p.trackedSubnets.Add(subnetID)
}
}
} else {
// this peer has old Version, we don't know what its interested in.
// so assume that it tracks all available subnets
p.trackedSubnets.Add(p.net.whitelistedSubnets.List()...)
}
fmt.Println(p.trackedSubnets)
if ip := p.getIP(); ip.IsZero() {
addr := p.conn.RemoteAddr()
localPeerIP, err := utils.ToIPDesc(addr.String())
if err == nil {
// If we have no clue what the peer's IP is, we can't perform any
// verification
if peerIP.IP.Equal(localPeerIP.IP) {
// if the IPs match, add this ip:port pair to be tracked
p.setIP(peerIP)
}
}
}
p.sendPeerList()
p.versionStruct.SetValue(peerVersion)
p.versionStr.SetValue(peerVersion.String())
p.gotVersion.SetValue(true)
p.tryMarkFinishedHandshake()
}
// assumes the [stateLock] is not held
func (p *peer) handleGetPeerList(_ message.Message) {
if p.gotVersion.GetValue() && !p.peerListSent.GetValue() {
p.sendPeerList()
}
}
func (p *peer) trackSignedPeer(peer utils.IPCertDesc) {
p.net.stateLock.Lock()
defer p.net.stateLock.Unlock()
switch {
case peer.IPDesc.Equal(p.net.ip.IP()):
return
case peer.IPDesc.IsZero():
return
case !p.net.allowPrivateIPs && peer.IPDesc.IsPrivate():
return
}
if float64(peer.Time)-float64(p.net.clock.Unix()) > p.net.maxClockDifference.Seconds() {
p.net.log.Debug("ignoring gossiped peer with version timestamp (%d) too far in the future", peer.Time)
return
}
nodeID := certToID(peer.Cert)
if !p.net.vdrs.Contains(nodeID) && !p.net.beacons.Contains(nodeID) {
p.net.log.Verbo(
"not peering to %s at %s because they are not a validator or beacon",
nodeID.PrefixedString(constants.NodeIDPrefix), peer.IPDesc,
)
return
}
// Am I already peered to them? (safe because [p.net.stateLock] is held)
if foundPeer, ok := p.net.peers.getByID(nodeID); ok && !foundPeer.closed.GetValue() {
p.net.log.Verbo(
"not peering to %s because we are already connected to %s",
peer.IPDesc, nodeID.PrefixedString(constants.NodeIDPrefix),
)
return
}
if p.net.latestPeerIP[nodeID].time > peer.Time {
p.net.log.Verbo(
"not peering to %s at %s: the given timestamp (%d) < latest (%d)",
nodeID.PrefixedString(constants.NodeIDPrefix), peer.IPDesc, peer.Time, p.net.latestPeerIP[nodeID].time,
)
return
}
signed := ipAndTimeBytes(peer.IPDesc, peer.Time)
err := peer.Cert.CheckSignature(peer.Cert.SignatureAlgorithm, signed, peer.Signature)
if err != nil {
p.net.log.Debug(
"signature verification failed for %s at %s: %s",
nodeID.PrefixedString(constants.NodeIDPrefix), peer.IPDesc, err,
)
return
}
p.net.latestPeerIP[nodeID] = signedPeerIP{
ip: peer.IPDesc,
time: peer.Time,
}
p.net.track(peer.IPDesc, nodeID)
}
// assumes the [stateLock] is not held
func (p *peer) handlePeerList(msg message.Message) {
p.gotPeerList.SetValue(true)
p.tryMarkFinishedHandshake()
ips := msg.Get(message.SignedPeers).([]utils.IPCertDesc)
for _, ip := range ips {
p.trackSignedPeer(ip)
}
}
// assumes the [stateLock] is not held
func (p *peer) handlePing(_ message.Message) {
p.sendPong()
}
// assumes the [stateLock] is not held
func (p *peer) handlePong(_ message.Message) {}
// assumes the [stateLock] is not held
func (p *peer) handleGetAcceptedFrontier(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
deadline := p.net.clock.Time().Add(time.Duration(msg.Get(message.Deadline).(uint64)))
p.net.router.GetAcceptedFrontier(
p.nodeID,
chainID,
requestID,
deadline,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handleAcceptedFrontier(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
containerIDsBytes := msg.Get(message.ContainerIDs).([][]byte)
containerIDs := make([]ids.ID, len(containerIDsBytes))
p.idSet.Clear()
for i, containerIDBytes := range containerIDsBytes {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
p.net.log.Debug(
"error parsing ContainerID from %s%s at %s. ID: 0x%x. Error: %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerIDBytes, err,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
if p.idSet.Contains(containerID) {
p.net.log.Debug(
"message from %s%s at %s contains duplicate of container ID %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerID,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
containerIDs[i] = containerID
p.idSet.Add(containerID)
}
p.net.router.AcceptedFrontier(
p.nodeID,
chainID,
requestID,
containerIDs,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handleGetAccepted(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
deadline := p.net.clock.Time().Add(time.Duration(msg.Get(message.Deadline).(uint64)))
containerIDsBytes := msg.Get(message.ContainerIDs).([][]byte)
containerIDs := make([]ids.ID, len(containerIDsBytes))
p.idSet.Clear()
for i, containerIDBytes := range containerIDsBytes {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
p.net.log.Debug(
"error parsing ContainerID from %s%s at %s. ID: 0x%x. Error: %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerIDBytes, err,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
if p.idSet.Contains(containerID) {
p.net.log.Debug(
"message from %s%s at %s contains duplicate of container ID %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerID,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
containerIDs[i] = containerID
p.idSet.Add(containerID)
}
p.net.router.GetAccepted(
p.nodeID,
chainID,
requestID,
deadline,
containerIDs,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handleAccepted(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
containerIDsBytes := msg.Get(message.ContainerIDs).([][]byte)
containerIDs := make([]ids.ID, len(containerIDsBytes))
p.idSet.Clear()
for i, containerIDBytes := range containerIDsBytes {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
p.net.log.Debug(
"error parsing ContainerID from %s%s at %s. ID: 0x%x. Error: %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerIDBytes, err,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
if p.idSet.Contains(containerID) {
p.net.log.Debug(
"message from %s%s at %s contains duplicate of container ID %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerID,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
containerIDs[i] = containerID
p.idSet.Add(containerID)
}
p.net.router.Accepted(
p.nodeID,
chainID,
requestID,
containerIDs,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handleGet(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
deadline := p.net.clock.Time().Add(time.Duration(msg.Get(message.Deadline).(uint64)))
containerID, err := ids.ToID(msg.Get(message.ContainerID).([]byte))
p.net.log.AssertNoError(err)
p.net.router.Get(
p.nodeID,
chainID,
requestID,
deadline,
containerID,
onFinishedHandling,
)
}
func (p *peer) handleGetAncestors(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
deadline := p.net.clock.Time().Add(time.Duration(msg.Get(message.Deadline).(uint64)))
containerID, err := ids.ToID(msg.Get(message.ContainerID).([]byte))
p.net.log.AssertNoError(err)
p.net.router.GetAncestors(
p.nodeID,
chainID,
requestID,
deadline,
containerID,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handlePut(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
containerID, err := ids.ToID(msg.Get(message.ContainerID).([]byte))
p.net.log.AssertNoError(err)
container := msg.Get(message.ContainerBytes).([]byte)
p.net.router.Put(
p.nodeID,
chainID,
requestID,
containerID,
container,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handleMultiPut(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
containers := msg.Get(message.MultiContainerBytes).([][]byte)
p.net.router.MultiPut(
p.nodeID,
chainID,
requestID,
containers,
onFinishedHandling,
)
}
func (p *peer) handlePushQuery(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
deadline := p.net.clock.Time().Add(time.Duration(msg.Get(message.Deadline).(uint64)))
containerID, err := ids.ToID(msg.Get(message.ContainerID).([]byte))
p.net.log.AssertNoError(err)
container := msg.Get(message.ContainerBytes).([]byte)
p.net.router.PushQuery(
p.nodeID,
chainID,
requestID,
deadline,
containerID,
container,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handlePullQuery(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
deadline := p.net.clock.Time().Add(time.Duration(msg.Get(message.Deadline).(uint64)))
containerID, err := ids.ToID(msg.Get(message.ContainerID).([]byte))
p.net.log.AssertNoError(err)
p.net.router.PullQuery(
p.nodeID,
chainID,
requestID,
deadline,
containerID,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handleChits(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
containerIDsBytes := msg.Get(message.ContainerIDs).([][]byte)
containerIDs := make([]ids.ID, len(containerIDsBytes))
p.idSet.Clear()
for i, containerIDBytes := range containerIDsBytes {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
p.net.log.Debug(
"error parsing ContainerID from %s%s at %s 0x%x: %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerIDBytes, err,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
if p.idSet.Contains(containerID) {
p.net.log.Debug(
"message from %s%s at %s contains duplicate of container ID %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerID,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
containerIDs[i] = containerID
p.idSet.Add(containerID)
}
p.net.router.Chits(
p.nodeID,
chainID,
requestID,
containerIDs,
onFinishedHandling,
)
}
// assumes the [stateLock] is held
func (p *peer) tryMarkFinishedHandshake() {
if !p.finishedHandshake.GetValue() && // not already marked as finished with handshake
p.gotVersion.GetValue() && // not waiting for Version
p.gotPeerList.GetValue() && // not waiting for PeerList
!p.closed.GetValue() { // not already disconnected
p.net.connected(p)
}
}
func (p *peer) discardIP() {
// By clearing the IP, we will not attempt to reconnect to this peer
if ip := p.getIP(); !ip.IsZero() {
p.setIP(utils.IPDesc{})
p.net.stateLock.Lock()
delete(p.net.disconnectedIPs, ip.String())
p.net.stateLock.Unlock()
}
p.Close()
}
func (p *peer) discardMyIP() {
// By clearing the IP, we will not attempt to reconnect to this peer
if ip := p.getIP(); !ip.IsZero() {
p.setIP(utils.IPDesc{})
str := ip.String()
p.net.stateLock.Lock()
p.net.myIPs[str] = struct{}{}
delete(p.net.disconnectedIPs, str)
p.net.stateLock.Unlock()
}
p.Close()
}
func (p *peer) setIP(ip utils.IPDesc) {
p.ipLock.Lock()
defer p.ipLock.Unlock()
p.ip = ip
}
func (p *peer) getIP() utils.IPDesc {
p.ipLock.RLock()
defer p.ipLock.RUnlock()
return p.ip
}
// addAlias marks that we have found another
// IP that we can connect to this peer at.
//
// assumes [stateLock] is held
func (p *peer) addAlias(ip utils.IPDesc) {
p.aliasLock.Lock()
defer p.aliasLock.Unlock()
p.net.peerAliasIPs[ip.String()] = struct{}{}
p.aliases = append(p.aliases, alias{
ip: ip,
expiry: p.net.clock.Time().Add(p.net.peerAliasTimeout),
})
// Set the [aliasTimer] if this ip is the first alias we put
// in [aliases].
if len(p.aliases) == 1 {
p.aliasTimer.SetTimeoutIn(p.net.peerAliasTimeout)
}
}
// releaseNextAlias returns the next released alias or nil if none was released.
// If none was released, then this will schedule the next time to remove an
// alias.
//
// assumes [stateLock] is held
func (p *peer) releaseNextAlias(now time.Time) *alias {
p.aliasLock.Lock()
defer p.aliasLock.Unlock()
if len(p.aliases) == 0 {
return nil
}
next := p.aliases[0]
if timeUntilExpiry := next.expiry.Sub(now); timeUntilExpiry > 0 {
p.aliasTimer.SetTimeoutIn(timeUntilExpiry)
return nil
}
p.aliases = p.aliases[1:]
p.net.log.Verbo("released alias %s for peer %s%s", next.ip, constants.NodeIDPrefix, p.nodeID)
return &next
}
// releaseExpiredAliases frees expired IP aliases. If there is an IP pending
// expiration, then the expiration is scheduled.
//
// assumes [stateLock] is not held
func (p *peer) releaseExpiredAliases() {
currentTime := p.net.clock.Time()
for {
next := p.releaseNextAlias(currentTime)
if next == nil {
return
}
// We should always release [aliasLock] before attempting
// to acquire the [stateLock] to avoid deadlocking on addAlias.
p.net.stateLock.Lock()
delete(p.net.peerAliasIPs, next.ip.String())
p.net.stateLock.Unlock()
}
}
// releaseAllAliases frees all alias IPs.
//
// assumes [stateLock] is held and that [aliasTimer]
// has been stopped
func (p *peer) releaseAllAliases() {
p.aliasLock.Lock()
defer p.aliasLock.Unlock()
for _, alias := range p.aliases {
delete(p.net.peerAliasIPs, alias.ip.String())
p.net.log.Verbo("released alias %s for peer %s%s", alias.ip, constants.NodeIDPrefix, p.nodeID)
}
p.aliases = nil
}
func (p *peer) nextTimeout() time.Time {
return p.net.clock.Time().Add(p.net.pingPongTimeout)
}
func ipAndTimeBytes(ip utils.IPDesc, timestamp uint64) []byte {
p := wrappers.Packer{
Bytes: make([]byte, wrappers.IPLen+wrappers.LongLen),
}
p.PackIP(ip)
p.PackLong(timestamp)
return p.Bytes
}
func ipAndTimeHash(ip utils.IPDesc, timestamp uint64) []byte {
return hashing.ComputeHash256(ipAndTimeBytes(ip, timestamp))
}
remove debug println
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package network
import (
"bufio"
"bytes"
"crypto/x509"
"encoding/binary"
"io"
"math"
"net"
"sync"
"sync/atomic"
"time"
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/network/message"
"github.com/ava-labs/avalanchego/utils"
"github.com/ava-labs/avalanchego/utils/constants"
"github.com/ava-labs/avalanchego/utils/formatting"
"github.com/ava-labs/avalanchego/utils/hashing"
"github.com/ava-labs/avalanchego/utils/timer"
"github.com/ava-labs/avalanchego/utils/wrappers"
)
// The signature of a peer's certificate on the byte representation
// of the peer's IP and time, and the time, in Unix seconds.
type signedPeerIP struct {
ip utils.IPDesc
time uint64
signature []byte
}
// alias is a secondary IP address where a peer
// was reached
type alias struct {
// ip where peer was reached
ip utils.IPDesc
// expiry is network time when the ip should be released
expiry time.Time
}
type peer struct {
net *network // network this peer is part of
// True if this peer has sent us a valid Version message and
// is running a compatible version.
// Only modified on the connection's reader routine.
gotVersion utils.AtomicBool
// True if this peer has sent us a valid PeerList message.
// Only modified on the connection's reader routine.
gotPeerList utils.AtomicBool
// only send the version to this peer on handling a getVersion message if
// a version hasn't already been sent.
versionSent utils.AtomicBool
versionWithSubnetsSent utils.AtomicBool
// only send the peerlist to this peer on handling a getPeerlist message if
// a peerlist hasn't already been sent.
peerListSent utils.AtomicBool
// True if the peer:
// * Has sent us a Version message
// * Has sent us a PeerList message
// * Is a compatible version
// Only modified on the connection's reader routine.
finishedHandshake utils.AtomicBool
// only close the peer once
once sync.Once
// if the close function has been called.
closed utils.AtomicBool
// queue of messages to be sent to this peer
sendQueue [][]byte
// Signalled when a message is added to [sendQueue],
// and when [p.closed] is set to true.
// [sendQueueCond.L] must be held when using [sendQueue].
sendQueueCond *sync.Cond
// ip may or may not be set when the peer is first started. is only modified
// on the connection's reader routine.
ip utils.IPDesc
// ipLock must be held when accessing [ip].
ipLock sync.RWMutex
// aliases is a list of IPs other than [ip] that we have connected to
// this peer at.
aliases []alias
// aliasTimer triggers the release of expired records from [aliases].
aliasTimer *timer.Timer
// aliasLock must be held when accessing [aliases] or [aliasTimer].
aliasLock sync.Mutex
// node ID of this peer.
nodeID ids.ShortID
// the connection object that is used to read/write messages from
conn net.Conn
// Version that this peer reported during the handshake.
// Set when we process the Version message from this peer.
versionStruct, versionStr utils.AtomicInterface
// Unix time of the last message sent and received respectively
// Must only be accessed atomically
lastSent, lastReceived int64
tickerCloser chan struct{}
// ticker processes
tickerOnce sync.Once
// [cert] is this peer's certificate (specifically the leaf of the certificate chain they provided)
cert *x509.Certificate
// sigAndTime contains a struct of type sigAndTime.
// The signature is [cert]'s signature on the peer's IP, concatenated with
// the peer's local time when it sent a Version.
// The time in [sigAndTime] is the one mentioned above.
sigAndTime utils.AtomicInterface
// Used in [handleAcceptedFrontier], [handleAccepted],
// [handleGetAccepted], [handleChits].
// We use this one ids.Set rather than allocating one per method call.
// Should be cleared before use.
// Should only be used in peer's reader goroutine.
idSet ids.Set
// True if we can compress messages sent to this peer
canHandleCompressed utils.AtomicBool
// trackedSubnets hold subnetIDs that this peer is interested in.
trackedSubnets ids.Set
}
// newPeer returns a properly initialized *peer.
func newPeer(net *network, conn net.Conn, ip utils.IPDesc) *peer {
p := &peer{
sendQueueCond: sync.NewCond(&sync.Mutex{}),
net: net,
conn: conn,
ip: ip,
tickerCloser: make(chan struct{}),
}
p.aliasTimer = timer.NewTimer(p.releaseExpiredAliases)
p.trackedSubnets.Add(constants.PrimaryNetworkID)
return p
}
// assume the [stateLock] is held
func (p *peer) Start() {
go func() {
// Make sure that the version is the first message sent
p.sendVersionWithSubnets()
p.sendVersion()
go p.ReadMessages()
go p.WriteMessages()
}()
}
func (p *peer) StartTicker() {
go p.requestFinishHandshake()
go p.sendPings()
go p.monitorAliases()
}
func (p *peer) sendPings() {
sendPingsTicker := time.NewTicker(p.net.pingFrequency)
defer sendPingsTicker.Stop()
for {
select {
case <-sendPingsTicker.C:
closed := p.closed.GetValue()
if closed {
return
}
p.sendPing()
case <-p.tickerCloser:
return
}
}
}
// request missing handshake messages from the peer
func (p *peer) requestFinishHandshake() {
finishHandshakeTicker := time.NewTicker(p.net.getVersionTimeout)
defer finishHandshakeTicker.Stop()
for {
select {
case <-finishHandshakeTicker.C:
if p.finishedHandshake.GetValue() {
return
}
if p.closed.GetValue() {
return
}
if !p.gotVersion.GetValue() {
p.sendGetVersion()
}
if !p.gotPeerList.GetValue() {
p.sendGetPeerList()
}
case <-p.tickerCloser:
return
}
}
}
// monitorAliases periodically attempts
// to release timed out alias IPs of the
// peer.
//
// monitorAliases will acquire [stateLock]
// when an alias is released.
func (p *peer) monitorAliases() {
go func() {
<-p.tickerCloser
p.aliasTimer.Stop()
}()
p.aliasTimer.Dispatch()
}
// Read and handle messages from this peer.
// When this method returns, the connection is closed.
func (p *peer) ReadMessages() {
defer p.Close()
// Continuously read and handle messages from this peer.
reader := bufio.NewReader(p.conn)
msgLenBytes := make([]byte, wrappers.IntLen)
for {
// Time out and close connection if we can't read message length
if err := p.conn.SetReadDeadline(p.nextTimeout()); err != nil {
p.net.log.Verbo("error setting the connection read timeout on %s%s at %s %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
return
}
// Read the message length
if _, err := io.ReadFull(reader, msgLenBytes); err != nil {
p.net.log.Verbo("error reading from %s%s at %s: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
return
}
// Parse the message length
msgLen := binary.BigEndian.Uint32(msgLenBytes)
// Make sure the message length is valid.
if int64(msgLen) > p.net.maxMessageSize {
p.net.log.Verbo("too large message length %d from %s%s at %s", msgLen, constants.NodeIDPrefix, p.nodeID, p.getIP())
return
}
// Wait until the throttler says we can proceed to read the message.
// Note that when we are done handling this message, or give up
// trying to read it, we must call [p.net.msgThrottler.Release]
// to give back the bytes used by this message.
p.net.inboundMsgThrottler.Acquire(uint64(msgLen), p.nodeID)
// Invariant: When done processing this message, onFinishedHandling() is called.
// If this is not honored, the message throttler will leak until no new messages can be read.
// You can look at message throttler metrics to verify that there is no leak.
onFinishedHandling := func() { p.net.inboundMsgThrottler.Release(uint64(msgLen), p.nodeID) }
// Time out and close connection if we can't read message
if err := p.conn.SetReadDeadline(p.nextTimeout()); err != nil {
p.net.log.Verbo("error setting the connection read timeout on %s%s at %s %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
onFinishedHandling()
return
}
// Read the message
msgBytes := make([]byte, msgLen)
if _, err := io.ReadFull(reader, msgBytes); err != nil {
p.net.log.Verbo("error reading from %s%s at %s: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
onFinishedHandling()
return
}
p.net.log.Verbo("parsing message from %s%s at %s:\n%s", constants.NodeIDPrefix, p.nodeID, p.getIP(), formatting.DumpBytes{Bytes: msgBytes})
// Parse the message
msg, err := p.net.c.Parse(msgBytes, p.canHandleCompressed.GetValue())
if err != nil {
p.net.log.Verbo("failed to parse message from %s%s at %s:\n%s\n%s", constants.NodeIDPrefix, p.nodeID, p.getIP(), formatting.DumpBytes{Bytes: msgBytes}, err)
// Couldn't parse the message. Read the next one.
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
continue
}
// Handle the message. Note that when we are done handling
// this message, we must call [p.net.msgThrottler.Release]
// to release the bytes used by this message. See MsgThrottler.
p.handle(msg, onFinishedHandling)
}
}
// attempt to write messages to the peer
func (p *peer) WriteMessages() {
defer p.Close()
var reader bytes.Reader
writer := bufio.NewWriter(p.conn)
for { // When this loop exits, p.sendQueueCond.L is unlocked
p.sendQueueCond.L.Lock()
for {
if p.closed.GetValue() {
p.sendQueueCond.L.Unlock()
return
}
if len(p.sendQueue) > 0 {
// There is a message to send
break
}
// Wait until there is a message to send
p.sendQueueCond.Wait()
}
msg := p.sendQueue[0]
p.sendQueue = p.sendQueue[1:]
p.sendQueueCond.L.Unlock()
msgLen := uint32(len(msg))
p.net.outboundMsgThrottler.Release(uint64(msgLen), p.nodeID)
p.net.log.Verbo("sending message to %s%s at %s:\n%s", constants.NodeIDPrefix, p.nodeID, p.getIP(), formatting.DumpBytes{Bytes: msg})
msgb := [wrappers.IntLen]byte{}
binary.BigEndian.PutUint32(msgb[:], msgLen)
for _, byteSlice := range [2][]byte{msgb[:], msg} {
reader.Reset(byteSlice)
if err := p.conn.SetWriteDeadline(p.nextTimeout()); err != nil {
p.net.log.Verbo("error setting write deadline to %s%s at %s due to: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
return
}
if _, err := io.CopyN(writer, &reader, int64(len((byteSlice)))); err != nil {
p.net.log.Verbo("error writing to %s%s at %s due to: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
return
}
p.tickerOnce.Do(p.StartTicker)
}
// Make sure the peer got the entire message
if err := writer.Flush(); err != nil {
p.net.log.Verbo("couldn't flush writer to %s%s at %s: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
return
}
now := p.net.clock.Time().Unix()
atomic.StoreInt64(&p.lastSent, now)
atomic.StoreInt64(&p.net.lastMsgSentTime, now)
p.net.byteSlicePool.Put(msg)
}
}
// send assumes that the [stateLock] is not held.
// If [canModifyMsg], [msg] may be modified by this method.
// If ![canModifyMsg], [msg] will not be modified by this method.
// [canModifyMsg] should be false if [msg] is sent in a loop, for example/.
func (p *peer) Send(msg message.Message, canModifyMsg bool) bool {
msgBytes := msg.Bytes()
msgLen := int64(len(msgBytes))
// Acquire space on the outbound message queue, or drop [msg] if we can't
dropMsg := !p.net.outboundMsgThrottler.Acquire(uint64(msgLen), p.nodeID)
if dropMsg {
p.net.log.Debug("dropping %s message to %s%s at %s due to rate-limiting", msg.Op(), constants.NodeIDPrefix, p.nodeID, p.getIP())
return false
}
// Invariant: must call p.net.outboundMsgThrottler.Release(uint64(msgLen), p.nodeID)
// when done sending [msg] or when we give up sending [msg]
p.sendQueueCond.L.Lock()
defer p.sendQueueCond.L.Unlock()
if p.closed.GetValue() {
p.net.log.Debug("dropping message to %s%s at %s due to a closed connection", constants.NodeIDPrefix, p.nodeID, p.getIP())
p.net.outboundMsgThrottler.Release(uint64(msgLen), p.nodeID)
return false
}
// If the flag says to not modify [msgBytes], copy it so that the copy,
// not [msgBytes], will be put back into the []byte pool after it's written.
toSend := msgBytes
if !canModifyMsg {
toSend = make([]byte, msgLen)
copy(toSend, msgBytes)
}
p.sendQueue = append(p.sendQueue, toSend)
p.sendQueueCond.Signal()
return true
}
// assumes the [stateLock] is not held
func (p *peer) handle(msg message.Message, onFinishedHandling func()) {
now := p.net.clock.Time()
atomic.StoreInt64(&p.lastReceived, now.Unix())
atomic.StoreInt64(&p.net.lastMsgReceivedTime, now.Unix())
msgLen := uint64(len(msg.Bytes()))
op := msg.Op()
msgMetrics := p.net.message(op)
if msgMetrics == nil {
p.net.log.Error("dropping an unknown message from %s%s at %s with op %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), op)
onFinishedHandling()
return
}
msgMetrics.numReceived.Inc()
msgMetrics.receivedBytes.Add(float64(msgLen))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
msgMetrics.savedReceivedBytes.Observe(float64(saved))
}
switch op { // Network-related message types
case message.Version:
p.handleVersion(msg)
onFinishedHandling()
return
case message.VersionWithSubnets:
p.handleVersionWithSubnets(msg)
onFinishedHandling()
return
case message.GetVersion:
p.handleGetVersion(msg)
onFinishedHandling()
return
case message.Ping:
p.handlePing(msg)
onFinishedHandling()
return
case message.Pong:
p.handlePong(msg)
onFinishedHandling()
return
case message.GetPeerList:
p.handleGetPeerList(msg)
onFinishedHandling()
return
case message.PeerList:
p.handlePeerList(msg)
onFinishedHandling()
return
}
if !p.finishedHandshake.GetValue() {
p.net.log.Debug("dropping %s from %s%s at %s because handshake isn't finished", op, constants.NodeIDPrefix, p.nodeID, p.getIP())
// attempt to finish the handshake
if !p.gotVersion.GetValue() {
p.sendGetVersion()
}
if !p.gotPeerList.GetValue() {
p.sendGetPeerList()
}
onFinishedHandling()
return
}
switch op { // Consensus-related messages
case message.GetAcceptedFrontier:
p.handleGetAcceptedFrontier(msg, onFinishedHandling)
case message.AcceptedFrontier:
p.handleAcceptedFrontier(msg, onFinishedHandling)
case message.GetAccepted:
p.handleGetAccepted(msg, onFinishedHandling)
case message.Accepted:
p.handleAccepted(msg, onFinishedHandling)
case message.Get:
p.handleGet(msg, onFinishedHandling)
case message.GetAncestors:
p.handleGetAncestors(msg, onFinishedHandling)
case message.Put:
p.handlePut(msg, onFinishedHandling)
case message.MultiPut:
p.handleMultiPut(msg, onFinishedHandling)
case message.PushQuery:
p.handlePushQuery(msg, onFinishedHandling)
case message.PullQuery:
p.handlePullQuery(msg, onFinishedHandling)
case message.Chits:
p.handleChits(msg, onFinishedHandling)
default:
p.net.log.Debug("dropping an unknown message from %s%s at %s with op %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), op)
onFinishedHandling()
}
}
// assumes the [stateLock] is not held
func (p *peer) Close() { p.once.Do(p.close) }
// assumes only [peer.Close] calls this.
// By the time this message returns, [p] has been removed from [p.net.peers]
func (p *peer) close() {
// If the connection is closing, we can immediately cancel the ticker
// goroutines.
close(p.tickerCloser)
p.closed.SetValue(true)
if err := p.conn.Close(); err != nil {
p.net.log.Debug("closing connection to %s%s at %s resulted in an error: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
}
p.sendQueueCond.L.Lock()
// Release the bytes of the unsent messages to the outbound message throttler
for i := 0; i < len(p.sendQueue); i++ {
p.net.outboundMsgThrottler.Release(uint64(len(p.sendQueue[i])), p.nodeID)
}
p.sendQueue = nil
p.sendQueueCond.L.Unlock()
// Per [p.sendQueueCond]'s spec, it is signalled when [p.closed] is set to true
// so that we exit the WriteMessages goroutine.
// Since [p.closed] is now true, nothing else will be put on [p.sendQueue]
p.sendQueueCond.Signal()
p.net.disconnected(p)
}
// assumes the [stateLock] is not held
func (p *peer) sendGetVersion() {
msg, err := p.net.b.GetVersion()
p.net.log.AssertNoError(err)
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.metrics.getVersion.numSent.Inc()
p.net.metrics.getVersion.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.getVersion.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
} else {
p.net.metrics.getVersion.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the [stateLock] is not held
func (p *peer) sendVersion() {
p.net.stateLock.RLock()
myIP := p.net.ip.IP()
myVersionTime, myVersionSig, err := p.net.getVersion(myIP)
if err != nil {
p.net.stateLock.RUnlock()
return
}
msg, err := p.net.b.Version(
p.net.networkID,
p.net.nodeID,
p.net.clock.Unix(),
myIP,
p.net.versionCompatibility.Version().String(),
myVersionTime,
myVersionSig,
)
p.net.stateLock.RUnlock()
p.net.log.AssertNoError(err)
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.metrics.version.numSent.Inc()
p.net.metrics.version.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.version.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
p.versionSent.SetValue(true)
} else {
p.net.metrics.version.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the [stateLock] is not held
func (p *peer) sendVersionWithSubnets() {
p.net.stateLock.RLock()
myIP := p.net.ip.IP()
myVersionTime, myVersionSig, err := p.net.getVersion(myIP)
if err != nil {
p.net.stateLock.RUnlock()
return
}
whitelistedSubnets := p.net.whitelistedSubnets
msg, err := p.net.b.VersionWithSubnets(
p.net.networkID,
p.net.nodeID,
p.net.clock.Unix(),
myIP,
p.net.versionCompatibility.Version().String(),
myVersionTime,
myVersionSig,
whitelistedSubnets.List(),
)
p.net.stateLock.RUnlock()
p.net.log.AssertNoError(err)
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.metrics.versionWithSubnets.numSent.Inc()
p.net.metrics.versionWithSubnets.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.versionWithSubnets.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
p.versionWithSubnetsSent.SetValue(true)
} else {
p.net.metrics.versionWithSubnets.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the [stateLock] is not held
func (p *peer) sendGetPeerList() {
msg, err := p.net.b.GetPeerList()
p.net.log.AssertNoError(err)
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.getPeerlist.numSent.Inc()
p.net.getPeerlist.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.getPeerlist.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
} else {
p.net.getPeerlist.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the stateLock is not held
func (p *peer) sendPeerList() {
peers, err := p.net.validatorIPs()
if err != nil {
return
}
// Compress this message only if the peer can handle compressed
// messages and we have compression enabled
canHandleCompressed := p.canHandleCompressed.GetValue()
msg, err := p.net.b.PeerList(peers, canHandleCompressed, canHandleCompressed && p.net.compressionEnabled)
if err != nil {
p.net.log.Warn("failed to send PeerList to %s%s at %s: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
return
}
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.peerList.numSent.Inc()
p.net.peerList.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.peerList.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
p.peerListSent.SetValue(true)
} else {
p.net.peerList.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the [stateLock] is not held
func (p *peer) sendPing() {
msg, err := p.net.b.Ping()
p.net.log.AssertNoError(err)
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.ping.numSent.Inc()
p.net.ping.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.ping.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
} else {
p.net.ping.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the [stateLock] is not held
func (p *peer) sendPong() {
msg, err := p.net.b.Pong()
p.net.log.AssertNoError(err)
lenMsg := len(msg.Bytes())
sent := p.Send(msg, true)
if sent {
p.net.pong.numSent.Inc()
p.net.pong.sentBytes.Add(float64(lenMsg))
// assume that if [saved] == 0, [msg] wasn't compressed
if saved := msg.BytesSavedCompression(); saved != 0 {
p.net.metrics.pong.savedSentBytes.Observe(float64(saved))
}
p.net.sendFailRateCalculator.Observe(0, p.net.clock.Time())
} else {
p.net.pong.numFailed.Inc()
p.net.sendFailRateCalculator.Observe(1, p.net.clock.Time())
}
}
// assumes the [stateLock] is not held
func (p *peer) handleGetVersion(_ message.Message) {
if !p.versionWithSubnetsSent.GetValue() {
p.sendVersionWithSubnets()
}
if !p.versionSent.GetValue() {
p.sendVersion()
}
}
// assumes the [stateLock] is not held
func (p *peer) handleVersion(msg message.Message) {
p.versionCheck(msg, false)
}
// assumes the [stateLock] is not held
func (p *peer) handleVersionWithSubnets(msg message.Message) {
p.versionCheck(msg, true)
}
// assumes the [stateLock] is not held
func (p *peer) versionCheck(msg message.Message, isVersionWithSubnets bool) {
switch {
case p.gotVersion.GetValue():
p.net.log.Verbo("dropping duplicated version message from %s%s at %s", constants.NodeIDPrefix, p.nodeID, p.getIP())
return
case msg.Get(message.NodeID).(uint32) == p.net.nodeID:
p.net.log.Debug("peer at %s has same node ID as me", p.getIP())
p.discardMyIP()
return
case msg.Get(message.NetworkID).(uint32) != p.net.networkID:
p.net.log.Debug(
"network ID of %s%s at %s (%d) doesn't match our's (%d)",
constants.NodeIDPrefix, p.nodeID, p.getIP(), msg.Get(message.NetworkID).(uint32), p.net.networkID,
)
p.discardIP()
return
case p.closed.GetValue():
return
}
myTime := float64(p.net.clock.Unix())
peerTime := float64(msg.Get(message.MyTime).(uint64))
if math.Abs(peerTime-myTime) > p.net.maxClockDifference.Seconds() {
if p.net.beacons.Contains(p.nodeID) {
p.net.log.Warn(
"beacon %s%s at %s reports time (%d) that is too far out of sync with our's (%d)",
constants.NodeIDPrefix, p.nodeID, p.getIP(), uint64(peerTime), uint64(myTime),
)
} else {
p.net.log.Debug(
"peer %s%s at %s reports time (%d) that is too far out of sync with our's (%d)",
constants.NodeIDPrefix, p.nodeID, p.getIP(), uint64(peerTime), uint64(myTime),
)
}
p.discardIP()
return
}
peerVersionStr := msg.Get(message.VersionStr).(string)
peerVersion, err := p.net.parser.Parse(peerVersionStr)
if err != nil {
p.net.log.Debug("version of %s%s at %s could not be parsed: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
p.discardIP()
p.net.metrics.failedToParse.Inc()
return
}
if p.net.versionCompatibility.Version().Before(peerVersion) {
if p.net.beacons.Contains(p.nodeID) {
p.net.log.Info(
"beacon %s%s at %s attempting to connect with newer version %s. You may want to update your client",
constants.NodeIDPrefix, p.nodeID, p.getIP(), peerVersion,
)
} else {
p.net.log.Debug(
"peer %s%s at %s attempting to connect with newer version %s. You may want to update your client",
constants.NodeIDPrefix, p.nodeID, p.getIP(), peerVersion,
)
}
}
if err := p.net.versionCompatibility.Compatible(peerVersion); err != nil {
p.net.log.Verbo("peer %s%s at %s version (%s) not compatible: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), peerVersion, err)
p.discardIP()
return
}
peerIP := msg.Get(message.IP).(utils.IPDesc)
versionTime := msg.Get(message.VersionTime).(uint64)
p.net.stateLock.RLock()
latestPeerIP := p.net.latestPeerIP[p.nodeID]
p.net.stateLock.RUnlock()
if latestPeerIP.time > versionTime {
p.discardIP()
return
}
if float64(versionTime)-myTime > p.net.maxClockDifference.Seconds() {
p.net.log.Debug(
"peer %s%s at %s attempting to connect with version timestamp (%d) too far in the future",
constants.NodeIDPrefix, p.nodeID, p.getIP(), latestPeerIP.time,
)
p.discardIP()
return
}
sig := msg.Get(message.SigBytes).([]byte)
signed := ipAndTimeBytes(peerIP, versionTime)
if err := p.cert.CheckSignature(p.cert.SignatureAlgorithm, signed, sig); err != nil {
p.net.log.Debug("signature verification failed for %s%s at %s: %s", constants.NodeIDPrefix, p.nodeID, p.getIP(), err)
p.discardIP()
return
}
p.canHandleCompressed.SetValue(peerVersion.Compare(minVersionCanHandleCompressed) >= 0)
signedPeerIP := signedPeerIP{
ip: peerIP,
time: versionTime,
signature: sig,
}
p.net.stateLock.Lock()
p.net.latestPeerIP[p.nodeID] = signedPeerIP
p.net.stateLock.Unlock()
p.sigAndTime.SetValue(signedPeerIP)
if isVersionWithSubnets {
subnetIDsBytes := msg.Get(message.TrackedSubnets).([][]byte)
for _, subnetIDBytes := range subnetIDsBytes {
subnetID, err := ids.ToID(subnetIDBytes)
if err != nil {
p.net.log.Debug("peer %s%s sent wrong subnetID %w", constants.NodeIDPrefix, p.nodeID, err)
continue
}
// add only if we also track this subnet
if p.net.whitelistedSubnets.Contains(subnetID) {
p.trackedSubnets.Add(subnetID)
}
}
} else {
// this peer has old Version, we don't know what its interested in.
// so assume that it tracks all available subnets
p.trackedSubnets.Add(p.net.whitelistedSubnets.List()...)
}
if ip := p.getIP(); ip.IsZero() {
addr := p.conn.RemoteAddr()
localPeerIP, err := utils.ToIPDesc(addr.String())
if err == nil {
// If we have no clue what the peer's IP is, we can't perform any
// verification
if peerIP.IP.Equal(localPeerIP.IP) {
// if the IPs match, add this ip:port pair to be tracked
p.setIP(peerIP)
}
}
}
p.sendPeerList()
p.versionStruct.SetValue(peerVersion)
p.versionStr.SetValue(peerVersion.String())
p.gotVersion.SetValue(true)
p.tryMarkFinishedHandshake()
}
// assumes the [stateLock] is not held
func (p *peer) handleGetPeerList(_ message.Message) {
if p.gotVersion.GetValue() && !p.peerListSent.GetValue() {
p.sendPeerList()
}
}
func (p *peer) trackSignedPeer(peer utils.IPCertDesc) {
p.net.stateLock.Lock()
defer p.net.stateLock.Unlock()
switch {
case peer.IPDesc.Equal(p.net.ip.IP()):
return
case peer.IPDesc.IsZero():
return
case !p.net.allowPrivateIPs && peer.IPDesc.IsPrivate():
return
}
if float64(peer.Time)-float64(p.net.clock.Unix()) > p.net.maxClockDifference.Seconds() {
p.net.log.Debug("ignoring gossiped peer with version timestamp (%d) too far in the future", peer.Time)
return
}
nodeID := certToID(peer.Cert)
if !p.net.vdrs.Contains(nodeID) && !p.net.beacons.Contains(nodeID) {
p.net.log.Verbo(
"not peering to %s at %s because they are not a validator or beacon",
nodeID.PrefixedString(constants.NodeIDPrefix), peer.IPDesc,
)
return
}
// Am I already peered to them? (safe because [p.net.stateLock] is held)
if foundPeer, ok := p.net.peers.getByID(nodeID); ok && !foundPeer.closed.GetValue() {
p.net.log.Verbo(
"not peering to %s because we are already connected to %s",
peer.IPDesc, nodeID.PrefixedString(constants.NodeIDPrefix),
)
return
}
if p.net.latestPeerIP[nodeID].time > peer.Time {
p.net.log.Verbo(
"not peering to %s at %s: the given timestamp (%d) < latest (%d)",
nodeID.PrefixedString(constants.NodeIDPrefix), peer.IPDesc, peer.Time, p.net.latestPeerIP[nodeID].time,
)
return
}
signed := ipAndTimeBytes(peer.IPDesc, peer.Time)
err := peer.Cert.CheckSignature(peer.Cert.SignatureAlgorithm, signed, peer.Signature)
if err != nil {
p.net.log.Debug(
"signature verification failed for %s at %s: %s",
nodeID.PrefixedString(constants.NodeIDPrefix), peer.IPDesc, err,
)
return
}
p.net.latestPeerIP[nodeID] = signedPeerIP{
ip: peer.IPDesc,
time: peer.Time,
}
p.net.track(peer.IPDesc, nodeID)
}
// assumes the [stateLock] is not held
func (p *peer) handlePeerList(msg message.Message) {
p.gotPeerList.SetValue(true)
p.tryMarkFinishedHandshake()
ips := msg.Get(message.SignedPeers).([]utils.IPCertDesc)
for _, ip := range ips {
p.trackSignedPeer(ip)
}
}
// assumes the [stateLock] is not held
func (p *peer) handlePing(_ message.Message) {
p.sendPong()
}
// assumes the [stateLock] is not held
func (p *peer) handlePong(_ message.Message) {}
// assumes the [stateLock] is not held
func (p *peer) handleGetAcceptedFrontier(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
deadline := p.net.clock.Time().Add(time.Duration(msg.Get(message.Deadline).(uint64)))
p.net.router.GetAcceptedFrontier(
p.nodeID,
chainID,
requestID,
deadline,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handleAcceptedFrontier(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
containerIDsBytes := msg.Get(message.ContainerIDs).([][]byte)
containerIDs := make([]ids.ID, len(containerIDsBytes))
p.idSet.Clear()
for i, containerIDBytes := range containerIDsBytes {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
p.net.log.Debug(
"error parsing ContainerID from %s%s at %s. ID: 0x%x. Error: %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerIDBytes, err,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
if p.idSet.Contains(containerID) {
p.net.log.Debug(
"message from %s%s at %s contains duplicate of container ID %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerID,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
containerIDs[i] = containerID
p.idSet.Add(containerID)
}
p.net.router.AcceptedFrontier(
p.nodeID,
chainID,
requestID,
containerIDs,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handleGetAccepted(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
deadline := p.net.clock.Time().Add(time.Duration(msg.Get(message.Deadline).(uint64)))
containerIDsBytes := msg.Get(message.ContainerIDs).([][]byte)
containerIDs := make([]ids.ID, len(containerIDsBytes))
p.idSet.Clear()
for i, containerIDBytes := range containerIDsBytes {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
p.net.log.Debug(
"error parsing ContainerID from %s%s at %s. ID: 0x%x. Error: %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerIDBytes, err,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
if p.idSet.Contains(containerID) {
p.net.log.Debug(
"message from %s%s at %s contains duplicate of container ID %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerID,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
containerIDs[i] = containerID
p.idSet.Add(containerID)
}
p.net.router.GetAccepted(
p.nodeID,
chainID,
requestID,
deadline,
containerIDs,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handleAccepted(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
containerIDsBytes := msg.Get(message.ContainerIDs).([][]byte)
containerIDs := make([]ids.ID, len(containerIDsBytes))
p.idSet.Clear()
for i, containerIDBytes := range containerIDsBytes {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
p.net.log.Debug(
"error parsing ContainerID from %s%s at %s. ID: 0x%x. Error: %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerIDBytes, err,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
if p.idSet.Contains(containerID) {
p.net.log.Debug(
"message from %s%s at %s contains duplicate of container ID %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerID,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
containerIDs[i] = containerID
p.idSet.Add(containerID)
}
p.net.router.Accepted(
p.nodeID,
chainID,
requestID,
containerIDs,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handleGet(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
deadline := p.net.clock.Time().Add(time.Duration(msg.Get(message.Deadline).(uint64)))
containerID, err := ids.ToID(msg.Get(message.ContainerID).([]byte))
p.net.log.AssertNoError(err)
p.net.router.Get(
p.nodeID,
chainID,
requestID,
deadline,
containerID,
onFinishedHandling,
)
}
func (p *peer) handleGetAncestors(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
deadline := p.net.clock.Time().Add(time.Duration(msg.Get(message.Deadline).(uint64)))
containerID, err := ids.ToID(msg.Get(message.ContainerID).([]byte))
p.net.log.AssertNoError(err)
p.net.router.GetAncestors(
p.nodeID,
chainID,
requestID,
deadline,
containerID,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handlePut(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
containerID, err := ids.ToID(msg.Get(message.ContainerID).([]byte))
p.net.log.AssertNoError(err)
container := msg.Get(message.ContainerBytes).([]byte)
p.net.router.Put(
p.nodeID,
chainID,
requestID,
containerID,
container,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handleMultiPut(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
containers := msg.Get(message.MultiContainerBytes).([][]byte)
p.net.router.MultiPut(
p.nodeID,
chainID,
requestID,
containers,
onFinishedHandling,
)
}
func (p *peer) handlePushQuery(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
deadline := p.net.clock.Time().Add(time.Duration(msg.Get(message.Deadline).(uint64)))
containerID, err := ids.ToID(msg.Get(message.ContainerID).([]byte))
p.net.log.AssertNoError(err)
container := msg.Get(message.ContainerBytes).([]byte)
p.net.router.PushQuery(
p.nodeID,
chainID,
requestID,
deadline,
containerID,
container,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handlePullQuery(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
deadline := p.net.clock.Time().Add(time.Duration(msg.Get(message.Deadline).(uint64)))
containerID, err := ids.ToID(msg.Get(message.ContainerID).([]byte))
p.net.log.AssertNoError(err)
p.net.router.PullQuery(
p.nodeID,
chainID,
requestID,
deadline,
containerID,
onFinishedHandling,
)
}
// assumes the [stateLock] is not held
func (p *peer) handleChits(msg message.Message, onFinishedHandling func()) {
chainID, err := ids.ToID(msg.Get(message.ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(message.RequestID).(uint32)
containerIDsBytes := msg.Get(message.ContainerIDs).([][]byte)
containerIDs := make([]ids.ID, len(containerIDsBytes))
p.idSet.Clear()
for i, containerIDBytes := range containerIDsBytes {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
p.net.log.Debug(
"error parsing ContainerID from %s%s at %s 0x%x: %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerIDBytes, err,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
if p.idSet.Contains(containerID) {
p.net.log.Debug(
"message from %s%s at %s contains duplicate of container ID %s",
constants.NodeIDPrefix, p.nodeID, p.getIP(), containerID,
)
onFinishedHandling()
p.net.metrics.failedToParse.Inc()
return
}
containerIDs[i] = containerID
p.idSet.Add(containerID)
}
p.net.router.Chits(
p.nodeID,
chainID,
requestID,
containerIDs,
onFinishedHandling,
)
}
// assumes the [stateLock] is held
func (p *peer) tryMarkFinishedHandshake() {
if !p.finishedHandshake.GetValue() && // not already marked as finished with handshake
p.gotVersion.GetValue() && // not waiting for Version
p.gotPeerList.GetValue() && // not waiting for PeerList
!p.closed.GetValue() { // not already disconnected
p.net.connected(p)
}
}
func (p *peer) discardIP() {
// By clearing the IP, we will not attempt to reconnect to this peer
if ip := p.getIP(); !ip.IsZero() {
p.setIP(utils.IPDesc{})
p.net.stateLock.Lock()
delete(p.net.disconnectedIPs, ip.String())
p.net.stateLock.Unlock()
}
p.Close()
}
func (p *peer) discardMyIP() {
// By clearing the IP, we will not attempt to reconnect to this peer
if ip := p.getIP(); !ip.IsZero() {
p.setIP(utils.IPDesc{})
str := ip.String()
p.net.stateLock.Lock()
p.net.myIPs[str] = struct{}{}
delete(p.net.disconnectedIPs, str)
p.net.stateLock.Unlock()
}
p.Close()
}
func (p *peer) setIP(ip utils.IPDesc) {
p.ipLock.Lock()
defer p.ipLock.Unlock()
p.ip = ip
}
func (p *peer) getIP() utils.IPDesc {
p.ipLock.RLock()
defer p.ipLock.RUnlock()
return p.ip
}
// addAlias marks that we have found another
// IP that we can connect to this peer at.
//
// assumes [stateLock] is held
func (p *peer) addAlias(ip utils.IPDesc) {
p.aliasLock.Lock()
defer p.aliasLock.Unlock()
p.net.peerAliasIPs[ip.String()] = struct{}{}
p.aliases = append(p.aliases, alias{
ip: ip,
expiry: p.net.clock.Time().Add(p.net.peerAliasTimeout),
})
// Set the [aliasTimer] if this ip is the first alias we put
// in [aliases].
if len(p.aliases) == 1 {
p.aliasTimer.SetTimeoutIn(p.net.peerAliasTimeout)
}
}
// releaseNextAlias returns the next released alias or nil if none was released.
// If none was released, then this will schedule the next time to remove an
// alias.
//
// assumes [stateLock] is held
func (p *peer) releaseNextAlias(now time.Time) *alias {
p.aliasLock.Lock()
defer p.aliasLock.Unlock()
if len(p.aliases) == 0 {
return nil
}
next := p.aliases[0]
if timeUntilExpiry := next.expiry.Sub(now); timeUntilExpiry > 0 {
p.aliasTimer.SetTimeoutIn(timeUntilExpiry)
return nil
}
p.aliases = p.aliases[1:]
p.net.log.Verbo("released alias %s for peer %s%s", next.ip, constants.NodeIDPrefix, p.nodeID)
return &next
}
// releaseExpiredAliases frees expired IP aliases. If there is an IP pending
// expiration, then the expiration is scheduled.
//
// assumes [stateLock] is not held
func (p *peer) releaseExpiredAliases() {
currentTime := p.net.clock.Time()
for {
next := p.releaseNextAlias(currentTime)
if next == nil {
return
}
// We should always release [aliasLock] before attempting
// to acquire the [stateLock] to avoid deadlocking on addAlias.
p.net.stateLock.Lock()
delete(p.net.peerAliasIPs, next.ip.String())
p.net.stateLock.Unlock()
}
}
// releaseAllAliases frees all alias IPs.
//
// assumes [stateLock] is held and that [aliasTimer]
// has been stopped
func (p *peer) releaseAllAliases() {
p.aliasLock.Lock()
defer p.aliasLock.Unlock()
for _, alias := range p.aliases {
delete(p.net.peerAliasIPs, alias.ip.String())
p.net.log.Verbo("released alias %s for peer %s%s", alias.ip, constants.NodeIDPrefix, p.nodeID)
}
p.aliases = nil
}
func (p *peer) nextTimeout() time.Time {
return p.net.clock.Time().Add(p.net.pingPongTimeout)
}
func ipAndTimeBytes(ip utils.IPDesc, timestamp uint64) []byte {
p := wrappers.Packer{
Bytes: make([]byte, wrappers.IPLen+wrappers.LongLen),
}
p.PackIP(ip)
p.PackLong(timestamp)
return p.Bytes
}
func ipAndTimeHash(ip utils.IPDesc, timestamp uint64) []byte {
return hashing.ComputeHash256(ipAndTimeBytes(ip, timestamp))
}
|
package main
import (
"context"
"log"
"math/rand"
"os"
"os/signal"
"syscall"
"time"
)
// Consumer struct
type Consumer struct {
inputChan chan int
jobsChan chan int
}
func getRandomTime() int {
rand.Seed(time.Now().UnixNano())
return rand.Intn(10)
}
func withContextFunc(ctx context.Context, f func()) context.Context {
ctx, cancel := context.WithCancel(ctx)
go func() {
c := make(chan os.Signal)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(c)
select {
case <-ctx.Done():
case <-c:
cancel()
f()
}
}()
return ctx
}
func (c *Consumer) queue(input int) bool {
select {
case c.inputChan <- input:
log.Println("already send input value:", input)
return true
default:
return false
}
}
func (c Consumer) startConsumer(ctx context.Context) {
for {
select {
case job := <-c.inputChan:
if ctx.Err() != nil {
close(c.jobsChan)
return
}
c.jobsChan <- job
case <-ctx.Done():
close(c.jobsChan)
return
}
}
}
func (c *Consumer) process(num, job int) {
n := getRandomTime()
log.Printf("Sleeping %d seconds...\n", n)
time.Sleep(time.Duration(n) * time.Second)
log.Println("worker:", num, " job value:", job)
}
func (c *Consumer) worker(ctx context.Context, num int) {
log.Println("start the worker", num)
for {
select {
case job := <-c.jobsChan:
if ctx.Err() != nil {
log.Println("get next job", job, "and close the worker", num)
return
}
c.process(num, job)
case <-ctx.Done():
log.Println("close the worker", num)
return
}
}
}
const poolSize = 2
func main() {
stop := make(chan bool)
// create the consumer
consumer := Consumer{
inputChan: make(chan int, 10),
jobsChan: make(chan int, poolSize),
}
ctx := withContextFunc(context.Background(), func() {
log.Println("cancel from ctrl+c event")
stop <- true
})
for i := 0; i < poolSize; i++ {
go consumer.worker(ctx, i)
}
go consumer.startConsumer(ctx)
consumer.queue(1)
consumer.queue(2)
consumer.queue(3)
consumer.queue(4)
consumer.queue(5)
<-stop
// time.Sleep(10 * time.Second)
}
chore: add stop channel
package main
import (
"context"
"log"
"math/rand"
"os"
"os/signal"
"syscall"
"time"
)
// Consumer struct
type Consumer struct {
inputChan chan int
jobsChan chan int
}
func getRandomTime() int {
rand.Seed(time.Now().UnixNano())
return rand.Intn(10)
}
func withContextFunc(ctx context.Context, f func()) context.Context {
ctx, cancel := context.WithCancel(ctx)
go func() {
c := make(chan os.Signal)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(c)
select {
case <-ctx.Done():
case <-c:
cancel()
f()
}
}()
return ctx
}
func (c *Consumer) queue(input int) bool {
select {
case c.inputChan <- input:
log.Println("already send input value:", input)
return true
default:
return false
}
}
func (c Consumer) startConsumer(ctx context.Context) {
for {
select {
case job := <-c.inputChan:
if ctx.Err() != nil {
close(c.jobsChan)
return
}
c.jobsChan <- job
case <-ctx.Done():
close(c.jobsChan)
return
}
}
}
func (c *Consumer) process(num, job int) {
n := getRandomTime()
log.Printf("Sleeping %d seconds...\n", n)
time.Sleep(time.Duration(n) * time.Second)
log.Println("worker:", num, " job value:", job)
}
func (c *Consumer) worker(ctx context.Context, num int) {
log.Println("start the worker", num)
for {
select {
case job := <-c.jobsChan:
if ctx.Err() != nil {
log.Println("get next job", job, "and close the worker", num)
return
}
c.process(num, job)
case <-ctx.Done():
log.Println("close the worker", num)
return
}
}
}
const poolSize = 2
func main() {
stop := make(chan bool)
// create the consumer
consumer := Consumer{
inputChan: make(chan int, 10),
jobsChan: make(chan int, poolSize),
}
ctx := withContextFunc(context.Background(), func() {
log.Println("cancel from ctrl+c event")
stop <- true
})
for i := 0; i < poolSize; i++ {
go consumer.worker(ctx, i)
}
go consumer.startConsumer(ctx)
go func() {
consumer.queue(1)
consumer.queue(2)
consumer.queue(3)
consumer.queue(4)
consumer.queue(5)
}()
<-stop
// time.Sleep(10 * time.Second)
}
|
package main
import (
//"io"
//"io/ioutil"
"flag"
"os"
//"bufio"
"fmt"
//"net"
//"net/http"
//"net/http/fcgi"
//"strconv"
//"unicode"
"encoding/json"
//"text/scanner"
"GoMySQL"
//"runtime"
//"bytes"
//"time"
//"strings"
"math"
//"math/rand"
//"crypto/sha1"
//"crypto/sha256"
//"compress/gzip"
//"crypto/aes"
//"sort"
//"net/smtp"
"log"
//"xiwi"
//"image"
//"image/color"
//"image/draw"
//"image/png"
//"image/jpeg"
"github.com/ungerik/go-cairo"
)
var flagDB = flag.String("db", "localhost", "MySQL database to connect to")
//var flagLogFile = flag.String("log-file", "", "file to output log information to")
//var flagPciteTable = flag.String("table", "pcite", "MySQL database table to get pcite data from")
//var flagFastCGIAddr = flag.String("fcgi", "", "listening on given address using FastCGI protocol (eg -fcgi :9100)")
//var flagHTTPAddr = flag.String("http", "", "listening on given address using HTTP protocol (eg -http :8089)")
//var flagTestQueryId = flag.Uint("test-id", 0, "run a test query with id")
//var flagTestQueryArxiv = flag.String("test-arxiv", "", "run a test query with arxiv")
//var flagMetaBaseDir = flag.String("meta", "", "Base directory for meta file data (abstracts etc.)")
func main() {
// parse command line options
flag.Parse()
if flag.NArg() != 2 {
log.Fatal("need to specify map.json file, and output file (without extension)")
}
//if len(*flagMetaBaseDir) == 0 {
// *flagMetaBaseDir = "/home/xiwi/data/meta"
//}
// connect to MySQL database
db, err := mysql.DialTCP(*flagDB, "hidden", "hidden", "xiwi")
if err != nil {
fmt.Println("cannot connect to database;", err)
return
}
defer db.Close()
DoWork(db, flag.Arg(0), flag.Arg(1))
}
type CairoColor struct {
r, g, b float64
}
type Paper struct {
id uint
maincat string
x int
y int
radius int
age float32
colBG CairoColor
colFG CairoColor
}
type Graph struct {
papers []*Paper
MinX, MinY, MaxX, MaxY int
BoundsX, BoundsY int
}
func QueryCategories(db *mysql.Client, id uint) string {
// execute the query
query := fmt.Sprintf("SELECT maincat,allcats FROM meta_data WHERE id=%d", id)
err := db.Query(query)
if err != nil {
fmt.Println("MySQL query error;", err)
return ""
}
// get result set
result, err := db.StoreResult()
if err != nil {
fmt.Println("MySQL store result error;", err)
return ""
}
// check if there are any results
if result.RowCount() == 0 {
return ""
}
// should be only 1 result
if result.RowCount() != 1 {
fmt.Println("MySQL multiple results; result count =", result.RowCount())
return ""
}
// get the row
row := result.FetchRow()
if row == nil {
return ""
}
// get the categories
var ok bool
var maincat string
if row[0] != nil {
if maincat, ok = row[0].(string); !ok { return "" }
}
/*
var allcats string
if row[1] != nil {
if allcats, ok := row[1].(string); !ok { return "" }
}
*/
db.FreeResult()
return maincat
}
func getPaperById(papers []*Paper, id uint) *Paper {
lo := 0
hi := len(papers) - 1
for lo <= hi {
mid := (lo + hi) / 2
if id == papers[mid].id {
return papers[mid]
} else if id < papers[mid].id {
hi = mid - 1
} else {
lo = mid + 1
}
}
return nil
}
func QueryCategories2(db *mysql.Client, papers []*Paper) {
// execute the query
err := db.Query("SELECT id,maincat,allcats FROM meta_data")
if err != nil {
fmt.Println("MySQL query error;", err)
return
}
// get result set
result, err := db.UseResult()
if err != nil {
fmt.Println("MySQL use result error;", err)
return
}
// get each row from the result
for {
row := result.FetchRow()
if row == nil {
break
}
var ok bool
var id uint64
var maincat string
//var allcats string
if id, ok = row[0].(uint64); !ok { continue }
if maincat, ok = row[1].(string); !ok { continue }
//if allcats, ok = row[2].(string); !ok { continue }
paper := getPaperById(papers, uint(id))
if paper != nil {
paper.maincat = maincat
}
}
db.FreeResult()
}
func MakePaper(db *mysql.Client, id uint, x int, y int, radius int, age float64) *Paper {
paper := new(Paper)
paper.id = id
paper.x = x
paper.y = y
paper.radius = radius
paper.age = float32(age)
//paper.maincat = QueryCategories(db, id)
return paper
}
func (paper *Paper) setColour() {
// basic colour of paper
var r, g, b float64
if paper.maincat == "hep-th" {
r, g, b = 0, 0, 1
} else if paper.maincat == "hep-ph" {
r, g, b = 0, 1, 0
} else if paper.maincat == "hep-ex" {
r, g, b = 1, 1, 0 // yellow
} else if paper.maincat == "gr-qc" {
r, g, b = 0, 1, 1 // cyan
} else if paper.maincat == "astro-ph.GA" {
r, g, b = 1, 0, 1 // purple
} else if paper.maincat == "hep-lat" {
r, g, b = 0.7, 0.36, 0.2 // tan brown
} else if paper.maincat == "astro-ph.CO" {
r, g, b = 0.62, 0.86, 0.24 // lime green
} else if paper.maincat == "astro-ph" {
r, g, b = 0.89, 0.53, 0.6 // skin pink
} else if paper.maincat == "cont-mat" {
r, g, b = 0.6, 0.4, 0.4
} else if paper.maincat == "quant-ph" {
r, g, b = 0.4, 0.7, 0.7
} else if paper.maincat == "physics" {
r, g, b = 0, 0.5, 0 // dark green
} else {
r, g, b = 0.7, 1, 0.3
}
// background colour
paper.colBG = CairoColor{0.7 + 0.3 * r, 0.7 + 0.3 * g, 0.7 + 0.3 * b}
// older papers are more saturated in colour
age := float64(paper.age)
saturation := 0.4 * (1 - age)
// foreground colour; newer papers tend towards red
age = age * age
r = saturation + (r * (1 - age) + age) * (1 - saturation)
g = saturation + (g * (1 - age) ) * (1 - saturation)
b = saturation + (b * (1 - age) ) * (1 - saturation)
paper.colFG = CairoColor{r, g, b}
}
func ReadGraph(db *mysql.Client, posFilename string) *Graph {
file, err := os.Open(flag.Arg(0))
if err != nil {
log.Fatal(err)
}
defer file.Close()
dec := json.NewDecoder(file)
var papers [][]int
if err := dec.Decode(&papers); err != nil {
log.Fatal(err)
}
//papers = papers[0:10000]
fmt.Printf("parsed %v papers\n", len(papers))
graph := new(Graph)
graph.papers = make([]*Paper, len(papers))
for index, paper := range papers {
var age float64 = float64(index) / float64(len(papers))
paperObj := MakePaper(db, uint(paper[0]), paper[1], paper[2], paper[3], age)
graph.papers[index] = paperObj
if paperObj.x < graph.MinX { graph.MinX = paperObj.x }
if paperObj.y < graph.MinY { graph.MinY = paperObj.y }
if paperObj.x > graph.MaxX { graph.MaxX = paperObj.x }
if paperObj.y > graph.MaxY { graph.MaxY = paperObj.y }
}
graph.BoundsX = graph.MaxX - graph.MinX
graph.BoundsY = graph.MaxY - graph.MinY
QueryCategories2(db, graph.papers)
for _, paper := range graph.papers {
paper.setColour()
}
fmt.Printf("graph has %v papers; min=(%v,%v), max=(%v,%v)\n", len(papers), graph.MinX, graph.MinY, graph.MaxX, graph.MaxY)
return graph
}
type QuadTreeNode struct {
//Parent *QuadTreeNode
//SideLength int
Leaf *Paper
Q0, Q1, Q2, Q3 *QuadTreeNode
}
type QuadTree struct {
MinX, MinY, MaxX, MaxY int
Root *QuadTreeNode
}
func QuadTreeInsertPaper(parent *QuadTreeNode, q **QuadTreeNode, paper *Paper, MinX, MinY, MaxX, MaxY int) {
if *q == nil {
// hit an empty node; create a new leaf cell and put this paper in it
*q = new(QuadTreeNode)
//(*q).Parent = parent
//(*q).SideLength = MaxX - MinX
(*q).Leaf = paper
} else if (*q).Leaf != nil {
// hit a leaf; turn it into an internal node and re-insert the papers
oldPaper := (*q).Leaf
(*q).Leaf = nil
(*q).Q0 = nil
(*q).Q1 = nil
(*q).Q2 = nil
(*q).Q3 = nil
QuadTreeInsertPaper(parent, q, oldPaper, MinX, MinY, MaxX, MaxY)
QuadTreeInsertPaper(parent, q, paper, MinX, MinY, MaxX, MaxY)
} else {
// hit an internal node
// check cell size didn't get too small
if (MaxX <= MinX + 1 || MaxY <= MinY + 1) {
log.Println("ERROR: QuadTreeInsertPaper hit minimum cell size")
return
}
// compute the dividing x and y positions
MidX := (MinX + MaxX) / 2
MidY := (MinY + MaxY) / 2
// insert the new paper in the correct cell
if ((paper.y) < MidY) {
if ((paper.x) < MidX) {
QuadTreeInsertPaper(*q, &(*q).Q0, paper, MinX, MinY, MidX, MidY)
} else {
QuadTreeInsertPaper(*q, &(*q).Q1, paper, MidX, MinY, MaxX, MidY)
}
} else {
if ((paper.x) < MidX) {
QuadTreeInsertPaper(*q, &(*q).Q2, paper, MinX, MidY, MidX, MaxY)
} else {
QuadTreeInsertPaper(*q, &(*q).Q3, paper, MidX, MidY, MaxX, MaxY)
}
}
}
}
func BuildQuadTree(papers []*Paper) *QuadTree {
qt := new(QuadTree)
// if no papers, return
if len(papers) == 0 {
return qt
}
// first work out the bounding box of all papers
qt.MinX = papers[0].x
qt.MinY = papers[0].y
qt.MaxX = papers[0].x
qt.MaxY = papers[0].y
for _, paper := range papers {
if (paper.x < qt.MinX) { qt.MinX = paper.x; }
if (paper.y < qt.MinY) { qt.MinY = paper.y; }
if (paper.x > qt.MaxX) { qt.MaxX = paper.x; }
if (paper.y > qt.MaxY) { qt.MaxY = paper.y; }
}
// increase the bounding box so it's square
{
dx := qt.MaxX - qt.MinX
dy := qt.MaxY - qt.MinY
if dx > dy {
cen_y := (qt.MinY + qt.MaxY) / 2
qt.MinY = cen_y - dx / 2
qt.MaxY = cen_y + dx / 2
} else {
cen_x := (qt.MinX + qt.MaxX) / 2
qt.MinX = cen_x - dy / 2
qt.MaxX = cen_x + dy / 2
}
}
// build the quad tree
for _, paper := range papers {
QuadTreeInsertPaper(nil, &qt.Root, paper, qt.MinX, qt.MinY, qt.MaxX, qt.MaxY)
}
fmt.Printf("quad tree bounding box: (%v,%v) -- (%v,%v)\n", qt.MinX, qt.MinY, qt.MaxX, qt.MaxY)
return qt
}
func (q *QuadTreeNode) ApplyIfWithin(MinX, MinY, MaxX, MaxY int, x, y, r int, f func(paper *Paper)) {
if q == nil {
} else if q.Leaf != nil {
r += q.Leaf.radius
if x - r <= q.Leaf.x && q.Leaf.x <= x + r && y - r <= q.Leaf.y && q.Leaf.y <= y + r {
f(q.Leaf)
}
} else if ((MinX <= x - r && x - r < MaxX) || (MinX <= x + r && x + r < MaxX) || (x - r < MinX && x + r >= MaxX)) &&
((MinY <= y - r && y - r < MaxY) || (MinY <= y + r && y + r < MaxY) || (y - r < MinY && y + r >= MaxY)) {
MidX := (MinX + MaxX) / 2
MidY := (MinY + MaxY) / 2
q.Q0.ApplyIfWithin(MinX, MinY, MidX, MidY, x, y, r, f)
q.Q1.ApplyIfWithin(MidX, MinY, MaxX, MidY, x, y, r, f)
q.Q2.ApplyIfWithin(MinX, MidY, MidX, MaxY, x, y, r, f)
q.Q3.ApplyIfWithin(MidX, MidY, MaxX, MaxY, x, y, r, f)
}
}
func (qt *QuadTree) ApplyIfWithin(x, y, r int, f func(paper *Paper)) {
qt.Root.ApplyIfWithin(qt.MinX, qt.MinY, qt.MaxX, qt.MaxY, x, y, r, f)
}
//func sq(x float64) float64 {
// return x * x
//}
func DoWork(db *mysql.Client, posFilename string, outFilename string) {
graph := ReadGraph(db, posFilename)
qt := BuildQuadTree(graph.papers)
surf := cairo.NewSurface(cairo.FORMAT_RGB24, graph.BoundsX / 12, graph.BoundsY / 12)
surf.SetSourceRGB(4.0/15, 5.0/15, 6.0/15)
//surf.SetSourceRGB(0, 0, 0)
surf.Paint()
matrix := new(cairo.Matrix)
matrix.Xx = float64(surf.GetWidth()) / float64(graph.BoundsX)
matrix.Yy = float64(surf.GetHeight()) / float64(graph.BoundsY)
if matrix.Xx < matrix.Yy {
matrix.Yy = matrix.Xx
} else {
matrix.Xx = matrix.Yy
}
matrix.X0 = 0.5 * float64(surf.GetWidth())
matrix.Y0 = 0.5 * float64(surf.GetHeight())
fmt.Println("rendering background")
// simple halo background circle for each paper
surf.SetMatrix(*matrix)
for _, paper := range graph.papers {
surf.SetSourceRGB(paper.colBG.r, paper.colBG.g, paper.colBG.b)
surf.Arc(float64(paper.x), float64(paper.y), 2 * float64(paper.radius), 0, 2 * math.Pi)
surf.Fill()
}
// area-based background
surf.IdentityMatrix()
matrixInv := *matrix
matrixInv.Invert()
for v := 0; v + 1 < surf.GetHeight(); v += 2 {
for u := 0; u + 1 < surf.GetWidth(); u += 2 {
x, y := matrixInv.TransformPoint(float64(u), float64(v))
ptR := 0.0
ptG := 0.0
ptB := 0.0
n := 0
qt.ApplyIfWithin(int(x), int(y), 200, func(paper *Paper) {
ptR += paper.colBG.r
ptG += paper.colBG.g
ptB += paper.colBG.b
n += 1
})
if n > 10 {
if n < 20 {
ptR += float64(20 - n) * 4.0/15
ptG += float64(20 - n) * 5.0/15
ptB += float64(20 - n) * 6.0/15
n = 20
}
ptR /= float64(n)
ptG /= float64(n)
ptB /= float64(n)
surf.SetSourceRGB(ptR, ptG, ptB)
surf.Rectangle(float64(u), float64(v), 2, 2)
surf.Fill()
}
}
}
// apply smoothing
{
data := surf.GetData()
w := surf.GetStride()
fmt.Println(surf.GetFormat())
data2 := make([]byte, len(data))
for v := 1; v + 1 < surf.GetHeight(); v += 1 {
for u := 1; u + 1 < surf.GetWidth(); u += 1 {
var r, g, b uint
/*
if data[v * w + u * 4 + 0] == 0 && data[v * w + u * 4 + 1] == 0 && data[v * w + u * 4 + 2] == 0 {
r = 5*0x44
g = 5*0x55
b = 5*0x66
} else {
*/
b = uint(data[(v - 1) * w + (u + 0) * 4 + 0]) +
uint(data[(v + 0) * w + (u - 1) * 4 + 0]) +
uint(data[(v + 0) * w + (u + 0) * 4 + 0]) +
uint(data[(v + 0) * w + (u + 1) * 4 + 0]) +
uint(data[(v + 1) * w + (u + 0) * 4 + 0])
g = uint(data[(v - 1) * w + (u + 0) * 4 + 1]) +
uint(data[(v + 0) * w + (u - 1) * 4 + 1]) +
uint(data[(v + 0) * w + (u + 0) * 4 + 1]) +
uint(data[(v + 0) * w + (u + 1) * 4 + 1]) +
uint(data[(v + 1) * w + (u + 0) * 4 + 1])
r = uint(data[(v - 1) * w + (u + 0) * 4 + 2]) +
uint(data[(v + 0) * w + (u - 1) * 4 + 2]) +
uint(data[(v + 0) * w + (u + 0) * 4 + 2]) +
uint(data[(v + 0) * w + (u + 1) * 4 + 2]) +
uint(data[(v + 1) * w + (u + 0) * 4 + 2])
//}
data2[v * w + u * 4 + 0] = byte(b / 5)
data2[v * w + u * 4 + 1] = byte(g / 5)
data2[v * w + u * 4 + 2] = byte(r / 5)
}
}
surf.SetData(data2)
}
// foreground
fmt.Println("rendering foreground")
surf.SetMatrix(*matrix)
surf.SetLineWidth(3)
for _, paper := range graph.papers {
surf.Arc(float64(paper.x), float64(paper.y), float64(paper.radius), 0, 2 * math.Pi)
surf.SetSourceRGB(paper.colFG.r, paper.colFG.g, paper.colFG.b)
surf.FillPreserve()
surf.SetSourceRGB(0, 0, 0)
surf.Stroke()
}
fmt.Println("writing file")
surf.WriteToPNG(outFilename + ".png")
//canv.EncodeJPEG("out-.jpg")
surf.Finish()
}
tiles: started code to generate tiles
package main
import (
//"io"
//"io/ioutil"
"flag"
"os"
//"bufio"
"fmt"
//"net"
//"net/http"
//"net/http/fcgi"
//"strconv"
//"unicode"
"encoding/json"
//"text/scanner"
"GoMySQL"
//"runtime"
//"bytes"
//"time"
//"strings"
"math"
//"math/rand"
//"crypto/sha1"
//"crypto/sha256"
//"compress/gzip"
//"crypto/aes"
//"sort"
//"net/smtp"
"log"
//"xiwi"
//"image"
//"image/color"
//"image/draw"
//"image/png"
//"image/jpeg"
"github.com/ungerik/go-cairo"
)
var flagDB = flag.String("db", "localhost", "MySQL database to connect to")
//var flagLogFile = flag.String("log-file", "", "file to output log information to")
//var flagPciteTable = flag.String("table", "pcite", "MySQL database table to get pcite data from")
//var flagFastCGIAddr = flag.String("fcgi", "", "listening on given address using FastCGI protocol (eg -fcgi :9100)")
//var flagHTTPAddr = flag.String("http", "", "listening on given address using HTTP protocol (eg -http :8089)")
//var flagTestQueryId = flag.Uint("test-id", 0, "run a test query with id")
//var flagTestQueryArxiv = flag.String("test-arxiv", "", "run a test query with arxiv")
//var flagMetaBaseDir = flag.String("meta", "", "Base directory for meta file data (abstracts etc.)")
func main() {
// parse command line options
flag.Parse()
if flag.NArg() != 2 {
log.Fatal("need to specify map.json file, and output prefix (without extension)")
}
//if len(*flagMetaBaseDir) == 0 {
// *flagMetaBaseDir = "/home/xiwi/data/meta"
//}
// connect to MySQL database
db, err := mysql.DialTCP(*flagDB, "hidden", "hidden", "xiwi")
if err != nil {
fmt.Println("cannot connect to database;", err)
return
}
defer db.Close()
DoWork(db, flag.Arg(0), flag.Arg(1))
}
type CairoColor struct {
r, g, b float64
}
type Paper struct {
id uint
maincat string
x int
y int
radius int
age float32
colBG CairoColor
colFG CairoColor
}
type Graph struct {
papers []*Paper
qt *QuadTree
MinX, MinY, MaxX, MaxY int
BoundsX, BoundsY int
}
func QueryCategories(db *mysql.Client, id uint) string {
// execute the query
query := fmt.Sprintf("SELECT maincat,allcats FROM meta_data WHERE id=%d", id)
err := db.Query(query)
if err != nil {
fmt.Println("MySQL query error;", err)
return ""
}
// get result set
result, err := db.StoreResult()
if err != nil {
fmt.Println("MySQL store result error;", err)
return ""
}
// check if there are any results
if result.RowCount() == 0 {
return ""
}
// should be only 1 result
if result.RowCount() != 1 {
fmt.Println("MySQL multiple results; result count =", result.RowCount())
return ""
}
// get the row
row := result.FetchRow()
if row == nil {
return ""
}
// get the categories
var ok bool
var maincat string
if row[0] != nil {
if maincat, ok = row[0].(string); !ok { return "" }
}
/*
var allcats string
if row[1] != nil {
if allcats, ok := row[1].(string); !ok { return "" }
}
*/
db.FreeResult()
return maincat
}
func getPaperById(papers []*Paper, id uint) *Paper {
lo := 0
hi := len(papers) - 1
for lo <= hi {
mid := (lo + hi) / 2
if id == papers[mid].id {
return papers[mid]
} else if id < papers[mid].id {
hi = mid - 1
} else {
lo = mid + 1
}
}
return nil
}
func QueryCategories2(db *mysql.Client, papers []*Paper) {
// execute the query
err := db.Query("SELECT id,maincat,allcats FROM meta_data")
if err != nil {
fmt.Println("MySQL query error;", err)
return
}
// get result set
result, err := db.UseResult()
if err != nil {
fmt.Println("MySQL use result error;", err)
return
}
// get each row from the result
for {
row := result.FetchRow()
if row == nil {
break
}
var ok bool
var id uint64
var maincat string
//var allcats string
if id, ok = row[0].(uint64); !ok { continue }
if maincat, ok = row[1].(string); !ok { continue }
//if allcats, ok = row[2].(string); !ok { continue }
paper := getPaperById(papers, uint(id))
if paper != nil {
paper.maincat = maincat
}
}
db.FreeResult()
}
func MakePaper(db *mysql.Client, id uint, x int, y int, radius int, age float64) *Paper {
paper := new(Paper)
paper.id = id
paper.x = x
paper.y = y
paper.radius = radius
paper.age = float32(age)
//paper.maincat = QueryCategories(db, id)
return paper
}
func (paper *Paper) setColour() {
// basic colour of paper
var r, g, b float64
if paper.maincat == "hep-th" {
r, g, b = 0, 0, 1
} else if paper.maincat == "hep-ph" {
r, g, b = 0, 1, 0
} else if paper.maincat == "hep-ex" {
r, g, b = 1, 1, 0 // yellow
} else if paper.maincat == "gr-qc" {
r, g, b = 0, 1, 1 // cyan
} else if paper.maincat == "astro-ph.GA" {
r, g, b = 1, 0, 1 // purple
} else if paper.maincat == "hep-lat" {
r, g, b = 0.7, 0.36, 0.2 // tan brown
} else if paper.maincat == "astro-ph.CO" {
r, g, b = 0.62, 0.86, 0.24 // lime green
} else if paper.maincat == "astro-ph" {
r, g, b = 0.89, 0.53, 0.6 // skin pink
} else if paper.maincat == "cont-mat" {
r, g, b = 0.6, 0.4, 0.4
} else if paper.maincat == "quant-ph" {
r, g, b = 0.4, 0.7, 0.7
} else if paper.maincat == "physics" {
r, g, b = 0, 0.5, 0 // dark green
} else {
r, g, b = 0.7, 1, 0.3
}
// background colour
paper.colBG = CairoColor{0.7 + 0.3 * r, 0.7 + 0.3 * g, 0.7 + 0.3 * b}
// older papers are more saturated in colour
age := float64(paper.age)
saturation := 0.4 * (1 - age)
// foreground colour; newer papers tend towards red
age = age * age
r = saturation + (r * (1 - age) + age) * (1 - saturation)
g = saturation + (g * (1 - age) ) * (1 - saturation)
b = saturation + (b * (1 - age) ) * (1 - saturation)
paper.colFG = CairoColor{r, g, b}
}
func ReadGraph(db *mysql.Client, posFilename string) *Graph {
file, err := os.Open(flag.Arg(0))
if err != nil {
log.Fatal(err)
}
defer file.Close()
dec := json.NewDecoder(file)
var papers [][]int
if err := dec.Decode(&papers); err != nil {
log.Fatal(err)
}
//papers = papers[0:10000]
fmt.Printf("parsed %v papers\n", len(papers))
graph := new(Graph)
graph.papers = make([]*Paper, len(papers))
for index, paper := range papers {
var age float64 = float64(index) / float64(len(papers))
paperObj := MakePaper(db, uint(paper[0]), paper[1], paper[2], paper[3], age)
graph.papers[index] = paperObj
if paperObj.x < graph.MinX { graph.MinX = paperObj.x }
if paperObj.y < graph.MinY { graph.MinY = paperObj.y }
if paperObj.x > graph.MaxX { graph.MaxX = paperObj.x }
if paperObj.y > graph.MaxY { graph.MaxY = paperObj.y }
}
graph.BoundsX = graph.MaxX - graph.MinX
graph.BoundsY = graph.MaxY - graph.MinY
QueryCategories2(db, graph.papers)
for _, paper := range graph.papers {
paper.setColour()
}
fmt.Printf("graph has %v papers; min=(%v,%v), max=(%v,%v)\n", len(papers), graph.MinX, graph.MinY, graph.MaxX, graph.MaxY)
// If we use quadtree may as well assign it here
graph.qt = BuildQuadTree(graph.papers)
return graph
}
type QuadTreeNode struct {
//Parent *QuadTreeNode
//SideLength int
Leaf *Paper
Q0, Q1, Q2, Q3 *QuadTreeNode
}
type QuadTree struct {
MinX, MinY, MaxX, MaxY int
Root *QuadTreeNode
}
func QuadTreeInsertPaper(parent *QuadTreeNode, q **QuadTreeNode, paper *Paper, MinX, MinY, MaxX, MaxY int) {
if *q == nil {
// hit an empty node; create a new leaf cell and put this paper in it
*q = new(QuadTreeNode)
//(*q).Parent = parent
//(*q).SideLength = MaxX - MinX
(*q).Leaf = paper
} else if (*q).Leaf != nil {
// hit a leaf; turn it into an internal node and re-insert the papers
oldPaper := (*q).Leaf
(*q).Leaf = nil
(*q).Q0 = nil
(*q).Q1 = nil
(*q).Q2 = nil
(*q).Q3 = nil
QuadTreeInsertPaper(parent, q, oldPaper, MinX, MinY, MaxX, MaxY)
QuadTreeInsertPaper(parent, q, paper, MinX, MinY, MaxX, MaxY)
} else {
// hit an internal node
// check cell size didn't get too small
if (MaxX <= MinX + 1 || MaxY <= MinY + 1) {
log.Println("ERROR: QuadTreeInsertPaper hit minimum cell size")
return
}
// compute the dividing x and y positions
MidX := (MinX + MaxX) / 2
MidY := (MinY + MaxY) / 2
// insert the new paper in the correct cell
if ((paper.y) < MidY) {
if ((paper.x) < MidX) {
QuadTreeInsertPaper(*q, &(*q).Q0, paper, MinX, MinY, MidX, MidY)
} else {
QuadTreeInsertPaper(*q, &(*q).Q1, paper, MidX, MinY, MaxX, MidY)
}
} else {
if ((paper.x) < MidX) {
QuadTreeInsertPaper(*q, &(*q).Q2, paper, MinX, MidY, MidX, MaxY)
} else {
QuadTreeInsertPaper(*q, &(*q).Q3, paper, MidX, MidY, MaxX, MaxY)
}
}
}
}
func BuildQuadTree(papers []*Paper) *QuadTree {
qt := new(QuadTree)
// if no papers, return
if len(papers) == 0 {
return qt
}
// first work out the bounding box of all papers
qt.MinX = papers[0].x
qt.MinY = papers[0].y
qt.MaxX = papers[0].x
qt.MaxY = papers[0].y
for _, paper := range papers {
if (paper.x < qt.MinX) { qt.MinX = paper.x; }
if (paper.y < qt.MinY) { qt.MinY = paper.y; }
if (paper.x > qt.MaxX) { qt.MaxX = paper.x; }
if (paper.y > qt.MaxY) { qt.MaxY = paper.y; }
}
// increase the bounding box so it's square
{
dx := qt.MaxX - qt.MinX
dy := qt.MaxY - qt.MinY
if dx > dy {
cen_y := (qt.MinY + qt.MaxY) / 2
qt.MinY = cen_y - dx / 2
qt.MaxY = cen_y + dx / 2
} else {
cen_x := (qt.MinX + qt.MaxX) / 2
qt.MinX = cen_x - dy / 2
qt.MaxX = cen_x + dy / 2
}
}
// build the quad tree
for _, paper := range papers {
QuadTreeInsertPaper(nil, &qt.Root, paper, qt.MinX, qt.MinY, qt.MaxX, qt.MaxY)
}
fmt.Printf("quad tree bounding box: (%v,%v) -- (%v,%v)\n", qt.MinX, qt.MinY, qt.MaxX, qt.MaxY)
return qt
}
func (q *QuadTreeNode) ApplyIfWithin(MinX, MinY, MaxX, MaxY int, x, y, r int, f func(paper *Paper)) {
if q == nil {
} else if q.Leaf != nil {
r += q.Leaf.radius
if x - r <= q.Leaf.x && q.Leaf.x <= x + r && y - r <= q.Leaf.y && q.Leaf.y <= y + r {
f(q.Leaf)
}
} else if ((MinX <= x - r && x - r < MaxX) || (MinX <= x + r && x + r < MaxX) || (x - r < MinX && x + r >= MaxX)) &&
((MinY <= y - r && y - r < MaxY) || (MinY <= y + r && y + r < MaxY) || (y - r < MinY && y + r >= MaxY)) {
MidX := (MinX + MaxX) / 2
MidY := (MinY + MaxY) / 2
q.Q0.ApplyIfWithin(MinX, MinY, MidX, MidY, x, y, r, f)
q.Q1.ApplyIfWithin(MidX, MinY, MaxX, MidY, x, y, r, f)
q.Q2.ApplyIfWithin(MinX, MidY, MidX, MaxY, x, y, r, f)
q.Q3.ApplyIfWithin(MidX, MidY, MaxX, MaxY, x, y, r, f)
}
}
func (qt *QuadTree) ApplyIfWithin(x, y, r int, f func(paper *Paper)) {
qt.Root.ApplyIfWithin(qt.MinX, qt.MinY, qt.MaxX, qt.MaxY, x, y, r, f)
}
func DrawTile(graph *Graph,xtot,ytot,xi,yi int, outPrefix string) {
surf := cairo.NewSurface(cairo.FORMAT_RGB24, 512, 512)
surf.SetSourceRGB(4.0/15, 5.0/15, 6.0/15)
//surf.SetSourceRGB(0, 0, 0)
surf.Paint()
matrix := new(cairo.Matrix)
matrix.Xx = float64(surf.GetWidth()*xtot) / float64(graph.BoundsX)
matrix.Yy = float64(surf.GetHeight()*ytot) / float64(graph.BoundsY)
// Make it square
if matrix.Xx < matrix.Yy {
matrix.Yy = matrix.Xx
} else {
matrix.Xx = matrix.Yy
}
matrix.X0 = float64((xtot+2*(1-xi))*surf.GetWidth())/2.
matrix.Y0 = float64((ytot+2*(1-yi))*surf.GetHeight())/2.
fmt.Println("rendering background")
// simple halo background circle for each paper
surf.SetMatrix(*matrix)
for _, paper := range graph.papers {
surf.SetSourceRGB(paper.colBG.r, paper.colBG.g, paper.colBG.b)
surf.Arc(float64(paper.x), float64(paper.y), 2 * float64(paper.radius), 0, 2 * math.Pi)
surf.Fill()
}
// area-based background
//qt := graph.qt
//surf.IdentityMatrix()
//matrixInv := *matrix
//matrixInv.Invert()
//for v := 0; v + 1 < surf.GetHeight(); v += 2 {
// for u := 0; u + 1 < surf.GetWidth(); u += 2 {
// x, y := matrixInv.TransformPoint(float64(u), float64(v))
// ptR := 0.0
// ptG := 0.0
// ptB := 0.0
// n := 0
// qt.ApplyIfWithin(int(x), int(y), 200, func(paper *Paper) {
// ptR += paper.colBG.r
// ptG += paper.colBG.g
// ptB += paper.colBG.b
// n += 1
// })
// if n > 10 {
// if n < 20 {
// ptR += float64(20 - n) * 4.0/15
// ptG += float64(20 - n) * 5.0/15
// ptB += float64(20 - n) * 6.0/15
// n = 20
// }
// ptR /= float64(n)
// ptG /= float64(n)
// ptB /= float64(n)
// surf.SetSourceRGB(ptR, ptG, ptB)
// surf.Rectangle(float64(u), float64(v), 2, 2)
// surf.Fill()
// }
// }
//}
// apply smoothing
//{
// data := surf.GetData()
// w := surf.GetStride()
// fmt.Println(surf.GetFormat())
// data2 := make([]byte, len(data))
// for v := 1; v + 1 < surf.GetHeight(); v += 1 {
// for u := 1; u + 1 < surf.GetWidth(); u += 1 {
// var r, g, b uint
// /*
// if data[v * w + u * 4 + 0] == 0 && data[v * w + u * 4 + 1] == 0 && data[v * w + u * 4 + 2] == 0 {
// r = 5*0x44
// g = 5*0x55
// b = 5*0x66
// } else {
// */
// b = uint(data[(v - 1) * w + (u + 0) * 4 + 0]) +
// uint(data[(v + 0) * w + (u - 1) * 4 + 0]) +
// uint(data[(v + 0) * w + (u + 0) * 4 + 0]) +
// uint(data[(v + 0) * w + (u + 1) * 4 + 0]) +
// uint(data[(v + 1) * w + (u + 0) * 4 + 0])
// g = uint(data[(v - 1) * w + (u + 0) * 4 + 1]) +
// uint(data[(v + 0) * w + (u - 1) * 4 + 1]) +
// uint(data[(v + 0) * w + (u + 0) * 4 + 1]) +
// uint(data[(v + 0) * w + (u + 1) * 4 + 1]) +
// uint(data[(v + 1) * w + (u + 0) * 4 + 1])
// r = uint(data[(v - 1) * w + (u + 0) * 4 + 2]) +
// uint(data[(v + 0) * w + (u - 1) * 4 + 2]) +
// uint(data[(v + 0) * w + (u + 0) * 4 + 2]) +
// uint(data[(v + 0) * w + (u + 1) * 4 + 2]) +
// uint(data[(v + 1) * w + (u + 0) * 4 + 2])
// //}
// data2[v * w + u * 4 + 0] = byte(b / 5)
// data2[v * w + u * 4 + 1] = byte(g / 5)
// data2[v * w + u * 4 + 2] = byte(r / 5)
// }
// }
// surf.SetData(data2)
//}
// foreground
fmt.Println("rendering foreground")
surf.SetMatrix(*matrix)
surf.SetLineWidth(3)
for _, paper := range graph.papers {
surf.Arc(float64(paper.x), float64(paper.y), float64(paper.radius), 0, 2 * math.Pi)
surf.SetSourceRGB(paper.colFG.r, paper.colFG.g, paper.colFG.b)
surf.FillPreserve()
surf.SetSourceRGB(0, 0, 0)
surf.Stroke()
}
fmt.Println("writing file")
filename := fmt.Sprintf("%stile_%d-%d_%d-%d.png",outPrefix,xtot,ytot,xi,yi)
surf.WriteToPNG(filename)
//canv.EncodeJPEG("out-.jpg")
surf.Finish()
}
func DoWork(db *mysql.Client, posFilename string, outPrefix string) {
graph := ReadGraph(db, posFilename)
DrawTile(graph,1,1,1,1,outPrefix)
DrawTile(graph,2,2,1,2,outPrefix)
DrawTile(graph,2,2,1,1,outPrefix)
DrawTile(graph,4,4,3,2,outPrefix)
DrawTile(graph,4,4,4,4,outPrefix)
}
|
package integration_test
import (
"fmt"
"io"
"net/http"
"os/exec"
"github.com/concourse/concourse/atc"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
"github.com/onsi/gomega/ghttp"
)
var _ = Describe("Fly CLI", func() {
Describe("clear-versions", func() {
var (
sharedResourcesStatus int
deleteVersionsStatus int
stdin io.Writer
args []string
sess *gexec.Session
)
BeforeEach(func() {
stdin = nil
args = []string{}
})
JustBeforeEach(func() {
var err error
flyCmd := exec.Command(flyPath, append([]string{"-t", targetName, "clear-versions"}, args...)...)
stdin, err = flyCmd.StdinPipe()
Expect(err).ToNot(HaveOccurred())
sess, err = gexec.Start(flyCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
})
yes := func() {
Eventually(sess).Should(gbytes.Say(`are you sure\? \[yN\]: `))
fmt.Fprintf(stdin, "y\n")
}
no := func() {
Eventually(sess).Should(gbytes.Say(`are you sure\? \[yN\]: `))
fmt.Fprintf(stdin, "n\n")
}
Context("when a resource or resource type is not specified", func() {
It("asks the user to specify a resource or resource type", func() {
Eventually(sess).Should(gexec.Exit(1))
Expect(sess.Err).To(gbytes.Say("please specify one of the required flags --resource or --resource-type"))
})
})
Context("when a resource is specified", func() {
var (
expectedDeleteURL = "/api/v1/teams/main/pipelines/some-pipeline/resources/some-resource/versions"
expectedSharedURL = "/api/v1/teams/main/pipelines/some-pipeline/resources/some-resource/shared"
)
JustBeforeEach(func() {
atcServer.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", expectedSharedURL),
ghttp.RespondWithJSONEncoded(sharedResourcesStatus, atc.ResourcesAndTypes{
Resources: atc.ResourceIdentifiers{
{
Name: "some-resource",
PipelineName: "some-pipeline",
TeamName: "some-team",
},
{
Name: "other-resource",
PipelineName: "some-pipeline",
TeamName: "some-team",
},
{
Name: "other-resource-2",
PipelineName: "other-pipeline",
TeamName: "other-team",
},
},
ResourceTypes: atc.ResourceIdentifiers{
{
Name: "some-resource-type",
PipelineName: "some-pipeline",
TeamName: "some-team",
},
},
}),
),
ghttp.CombineHandlers(
ghttp.VerifyRequest("DELETE", expectedDeleteURL),
ghttp.RespondWithJSONEncoded(deleteVersionsStatus, atc.ClearVersionsResponse{VersionsRemoved: 1}),
),
)
})
BeforeEach(func() {
args = append(args, "--resource", "some-pipeline/some-resource")
})
Context("when the resource exists and delete succeeds", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusOK
deleteVersionsStatus = http.StatusOK
})
It("warns any shared resources/resource-types that the deletion will affect (because of shared version history)", func() {
Eventually(sess).Should(gbytes.Say(`!!! this will clear the version histories for the following resources:
- some-team/some-pipeline/some-resource
- some-team/some-pipeline/other-resource
- other-team/other-pipeline/other-resource-2
and the following resource types:
- some-team/some-pipeline/some-resource-type`))
})
It("succeeds with deletion", func() {
yes()
Eventually(sess).Should(gexec.Exit(0))
Eventually(sess).Should(gbytes.Say("1 versions removed"))
})
It("bails out when user says no", func() {
no()
Eventually(sess).Should(gbytes.Say(`bailing out`))
Eventually(sess).ShouldNot(gbytes.Say("versions removed"))
Eventually(sess).Should(gexec.Exit(0))
})
})
Context("when deleting the versions fails", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusOK
deleteVersionsStatus = http.StatusInternalServerError
})
It("fails to delete versions", func() {
yes()
Eventually(sess.Err).Should(gbytes.Say("Unexpected Response"))
Expect(sess.ExitCode()).ToNot(Equal(0))
})
})
Context("when the resource is not found when fetching shared resources/resource types", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusNotFound
deleteVersionsStatus = http.StatusOK
})
It("fails to delete versions", func() {
Eventually(sess.Err).Should(gbytes.Say("resource 'some-resource' is not found"))
Eventually(sess).Should(gexec.Exit(1))
})
})
Context("when fetching shared resources/resource-types returns unexpected status code", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusInternalServerError
deleteVersionsStatus = http.StatusOK
})
It("fails to delete versions", func() {
Eventually(sess.Err).Should(gbytes.Say("Unexpected Response"))
Eventually(sess).Should(gexec.Exit(1))
})
})
})
Context("when a resource type is specified", func() {
var (
expectedDeleteURL = "/api/v1/teams/main/pipelines/some-pipeline/resource-types/some-resource-type/versions"
expectedSharedURL = "/api/v1/teams/main/pipelines/some-pipeline/resource-types/some-resource-type/shared"
)
JustBeforeEach(func() {
atcServer.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", expectedSharedURL),
ghttp.RespondWithJSONEncoded(sharedResourcesStatus, atc.ResourcesAndTypes{
Resources: atc.ResourceIdentifiers{
{
Name: "some-resource",
PipelineName: "some-pipeline",
TeamName: "some-team",
},
},
ResourceTypes: atc.ResourceIdentifiers{
{
Name: "some-resource-type",
PipelineName: "some-pipeline",
TeamName: "some-team",
},
{
Name: "other-resource-type",
PipelineName: "some-pipeline",
TeamName: "some-team",
},
{
Name: "other-resource-type-2",
PipelineName: "other-pipeline",
TeamName: "other-team",
},
},
}),
),
ghttp.CombineHandlers(
ghttp.VerifyRequest("DELETE", expectedDeleteURL),
ghttp.RespondWithJSONEncoded(deleteVersionsStatus, atc.ClearVersionsResponse{VersionsRemoved: 2}),
),
)
})
BeforeEach(func() {
args = append(args, "--resource-type", "some-pipeline/some-resource-type")
})
Context("when the resource type exists and delete succeeds", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusOK
deleteVersionsStatus = http.StatusOK
})
It("warns any shared resources/resource-types that the deletion will affect (because of shared version history)", func() {
Eventually(sess).Should(gbytes.Say(`!!! this will clear the version histories for the following resources:
- some-team/some-pipeline/some-resource
and the following resource types:
- some-team/some-pipeline/some-resource-type
- some-team/some-pipeline/other-resource-type
- other-team/other-pipeline/other-resource-type-2`))
})
It("succeeds with deletion", func() {
yes()
Eventually(sess).Should(gexec.Exit(0))
Eventually(sess).Should(gbytes.Say("2 versions removed"))
})
It("bails out when user says no", func() {
no()
Eventually(sess).Should(gbytes.Say(`bailing out`))
Eventually(sess).ShouldNot(gbytes.Say("versions removed"))
Eventually(sess).Should(gexec.Exit(0))
})
})
Context("when deleting the versions fails", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusOK
deleteVersionsStatus = http.StatusInternalServerError
})
It("fails to delete versions", func() {
yes()
Eventually(sess.Err).Should(gbytes.Say("Unexpected Response"))
Expect(sess.ExitCode()).ToNot(Equal(0))
})
})
Context("when the resource type is not found when fetching shared resources/resource types", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusNotFound
deleteVersionsStatus = http.StatusOK
})
It("fails to delete versions", func() {
Eventually(sess.Err).Should(gbytes.Say("resource type 'some-resource-type' is not found"))
Eventually(sess).Should(gexec.Exit(1))
})
})
Context("when fetching shared resources/resource-types returns unexpected status code", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusInternalServerError
deleteVersionsStatus = http.StatusOK
})
It("fails to delete versions", func() {
Eventually(sess.Err).Should(gbytes.Say("Unexpected Response"))
Eventually(sess).Should(gexec.Exit(1))
})
})
})
Context("when both resource and resource type is specified", func() {
BeforeEach(func() {
args = append(args, "--resource", "some-pipeline/some-resource", "--resource-type", "some-pipeline/some-resource-type")
})
It("errors", func() {
Eventually(sess.Err).Should(gbytes.Say(fmt.Sprintf("can specify only one of --resource or --resource-type\n")))
Expect(sess.ExitCode()).To(Equal(1))
})
})
})
})
fix fly integration test in window OS
Signed-off-by: Rui Yang <4d6b7b1f23ec6027b801cceb8a18a6505e32557e@vmware.com>
package integration_test
import (
"fmt"
"io"
"net/http"
"os/exec"
"github.com/concourse/concourse/atc"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
"github.com/onsi/gomega/ghttp"
)
var _ = Describe("Fly CLI", func() {
Describe("clear-versions", func() {
var (
sharedResourcesStatus int
deleteVersionsStatus int
stdin io.Writer
args []string
sess *gexec.Session
)
BeforeEach(func() {
stdin = nil
args = []string{}
})
JustBeforeEach(func() {
var err error
flyCmd := exec.Command(flyPath, append([]string{"-t", targetName, "clear-versions"}, args...)...)
stdin, err = flyCmd.StdinPipe()
Expect(err).ToNot(HaveOccurred())
sess, err = gexec.Start(flyCmd, GinkgoWriter, GinkgoWriter)
Expect(err).ToNot(HaveOccurred())
})
yes := func() {
Eventually(sess).Should(gbytes.Say(`are you sure\? \[yN\]: `))
fmt.Fprintf(stdin, "y\n")
}
no := func() {
Eventually(sess).Should(gbytes.Say(`are you sure\? \[yN\]: `))
fmt.Fprintf(stdin, "n\n")
}
Context("when a resource or resource type is not specified", func() {
It("asks the user to specify a resource or resource type", func() {
Eventually(sess).Should(gexec.Exit(1))
Expect(sess.Err).To(gbytes.Say("please specify one of the required flags --resource or --resource-type"))
})
})
Context("when a resource is specified", func() {
var (
expectedDeleteURL = "/api/v1/teams/main/pipelines/some-pipeline/resources/some-resource/versions"
expectedSharedURL = "/api/v1/teams/main/pipelines/some-pipeline/resources/some-resource/shared"
)
JustBeforeEach(func() {
atcServer.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", expectedSharedURL),
ghttp.RespondWithJSONEncoded(sharedResourcesStatus, atc.ResourcesAndTypes{
Resources: atc.ResourceIdentifiers{
{
Name: "some-resource",
PipelineName: "some-pipeline",
TeamName: "some-team",
},
{
Name: "other-resource",
PipelineName: "some-pipeline",
TeamName: "some-team",
},
{
Name: "other-resource-2",
PipelineName: "other-pipeline",
TeamName: "other-team",
},
},
ResourceTypes: atc.ResourceIdentifiers{
{
Name: "some-resource-type",
PipelineName: "some-pipeline",
TeamName: "some-team",
},
},
}),
),
ghttp.CombineHandlers(
ghttp.VerifyRequest("DELETE", expectedDeleteURL),
ghttp.RespondWithJSONEncoded(deleteVersionsStatus, atc.ClearVersionsResponse{VersionsRemoved: 1}),
),
)
})
BeforeEach(func() {
args = append(args, "--resource", "some-pipeline/some-resource")
})
Context("when the resource exists and delete succeeds", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusOK
deleteVersionsStatus = http.StatusOK
})
It("warns any shared resources/resource-types that the deletion will affect (because of shared version history)", func() {
Eventually(sess).Should(gbytes.Say(`!!! this will clear the version histories for the following resources:
- some-team/some-pipeline/some-resource
- some-team/some-pipeline/other-resource
- other-team/other-pipeline/other-resource-2
and the following resource types:
- some-team/some-pipeline/some-resource-type`))
})
It("succeeds with deletion", func() {
yes()
Eventually(sess).Should(gexec.Exit(0))
Eventually(sess).Should(gbytes.Say("1 versions removed"))
})
It("bails out when user says no", func() {
no()
Eventually(sess).Should(gbytes.Say(`bailing out`))
Eventually(sess).ShouldNot(gbytes.Say("versions removed"))
Eventually(sess).Should(gexec.Exit(0))
})
})
Context("when deleting the versions fails", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusOK
deleteVersionsStatus = http.StatusInternalServerError
})
It("fails to delete versions", func() {
yes()
Eventually(sess.Err).Should(gbytes.Say("Unexpected Response"))
Expect(sess.ExitCode()).ToNot(Equal(0))
})
})
Context("when the resource is not found when fetching shared resources/resource types", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusNotFound
deleteVersionsStatus = http.StatusOK
})
It("fails to delete versions", func() {
Eventually(sess.Err).Should(gbytes.Say("resource 'some-resource' is not found"))
Eventually(sess).Should(gexec.Exit(1))
})
})
Context("when fetching shared resources/resource-types returns unexpected status code", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusInternalServerError
deleteVersionsStatus = http.StatusOK
})
It("fails to delete versions", func() {
Eventually(sess.Err).Should(gbytes.Say("Unexpected Response"))
Eventually(sess).Should(gexec.Exit(1))
})
})
})
Context("when a resource type is specified", func() {
var (
expectedDeleteURL = "/api/v1/teams/main/pipelines/some-pipeline/resource-types/some-resource-type/versions"
expectedSharedURL = "/api/v1/teams/main/pipelines/some-pipeline/resource-types/some-resource-type/shared"
)
JustBeforeEach(func() {
atcServer.AppendHandlers(
ghttp.CombineHandlers(
ghttp.VerifyRequest("GET", expectedSharedURL),
ghttp.RespondWithJSONEncoded(sharedResourcesStatus, atc.ResourcesAndTypes{
Resources: atc.ResourceIdentifiers{
{
Name: "some-resource",
PipelineName: "some-pipeline",
TeamName: "some-team",
},
},
ResourceTypes: atc.ResourceIdentifiers{
{
Name: "some-resource-type",
PipelineName: "some-pipeline",
TeamName: "some-team",
},
{
Name: "other-resource-type",
PipelineName: "some-pipeline",
TeamName: "some-team",
},
{
Name: "other-resource-type-2",
PipelineName: "other-pipeline",
TeamName: "other-team",
},
},
}),
),
ghttp.CombineHandlers(
ghttp.VerifyRequest("DELETE", expectedDeleteURL),
ghttp.RespondWithJSONEncoded(deleteVersionsStatus, atc.ClearVersionsResponse{VersionsRemoved: 2}),
),
)
})
BeforeEach(func() {
args = append(args, "--resource-type", "some-pipeline/some-resource-type")
})
Context("when the resource type exists and delete succeeds", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusOK
deleteVersionsStatus = http.StatusOK
})
It("warns any shared resources/resource-types that the deletion will affect (because of shared version history)", func() {
Eventually(sess).Should(gbytes.Say(`!!! this will clear the version histories for the following resources:
- some-team/some-pipeline/some-resource
and the following resource types:
- some-team/some-pipeline/some-resource-type
- some-team/some-pipeline/other-resource-type
- other-team/other-pipeline/other-resource-type-2`))
})
It("succeeds with deletion", func() {
yes()
Eventually(sess).Should(gexec.Exit(0))
Eventually(sess).Should(gbytes.Say("2 versions removed"))
})
It("bails out when user says no", func() {
no()
Eventually(sess).Should(gbytes.Say(`bailing out`))
Eventually(sess).ShouldNot(gbytes.Say("versions removed"))
Eventually(sess).Should(gexec.Exit(0))
})
})
Context("when deleting the versions fails", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusOK
deleteVersionsStatus = http.StatusInternalServerError
})
It("fails to delete versions", func() {
yes()
Eventually(sess.Err).Should(gbytes.Say("Unexpected Response"))
Expect(sess.ExitCode()).ToNot(Equal(0))
})
})
Context("when the resource type is not found when fetching shared resources/resource types", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusNotFound
deleteVersionsStatus = http.StatusOK
})
It("fails to delete versions", func() {
Eventually(sess.Err).Should(gbytes.Say("resource type 'some-resource-type' is not found"))
Eventually(sess).Should(gexec.Exit(1))
})
})
Context("when fetching shared resources/resource-types returns unexpected status code", func() {
BeforeEach(func() {
sharedResourcesStatus = http.StatusInternalServerError
deleteVersionsStatus = http.StatusOK
})
It("fails to delete versions", func() {
Eventually(sess.Err).Should(gbytes.Say("Unexpected Response"))
Eventually(sess).Should(gexec.Exit(1))
})
})
})
Context("when both resource and resource type is specified", func() {
BeforeEach(func() {
args = append(args, "--resource", "some-pipeline/some-resource", "--resource-type", "some-pipeline/some-resource-type")
})
It("errors", func() {
Eventually(sess.Err).Should(gbytes.Say(fmt.Sprintf("can specify only one of --resource or --resource-type\n")))
Expect(sess.ExitCode()).ToNot(Equal(0))
})
})
})
})
|
package notification
import (
"net/http"
"net/url"
"socialapi/models"
"socialapi/workers/api/modules/helpers"
)
func List(u *url.URL, h http.Header, _ interface{}) (int, http.Header, interface{}, error) {
n := models.NewNotification()
list, err := n.List(helpers.GetQuery(u))
if err != nil {
return helpers.NewBadRequestResponse(err)
}
return helpers.NewOKResponse(list)
}
func Glance(u *url.URL, h http.Header, req *models.Notification) (int, http.Header, interface{}, error) {
if err := req.Glance(); err != nil {
return helpers.NewBadRequestResponse(err)
}
req.Glanced = true
res := map[string]interface{}{
"Glanced": true,
}
return helpers.NewOKResponse(res)
}
Social: in case of not valid accounts, response is changed to not found
package notification
import (
"github.com/jinzhu/gorm"
"net/http"
"net/url"
"socialapi/models"
"socialapi/workers/api/modules/helpers"
)
func List(u *url.URL, h http.Header, _ interface{}) (int, http.Header, interface{}, error) {
n := models.NewNotification()
list, err := n.List(helpers.GetQuery(u))
if err != nil {
if err == gorm.RecordNotFound {
return helpers.NewNotFoundResponse()
}
return helpers.NewBadRequestResponse(err)
}
return helpers.NewOKResponse(list)
}
func Glance(u *url.URL, h http.Header, req *models.Notification) (int, http.Header, interface{}, error) {
if err := req.Glance(); err != nil {
if err == gorm.RecordNotFound {
return helpers.NewNotFoundResponse()
}
return helpers.NewBadRequestResponse(err)
}
req.Glanced = true
res := map[string]interface{}{
"Glanced": true,
}
return helpers.NewOKResponse(res)
}
|
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package importer
// This file defines various utility functions exposed by the package
// and used by it.
import (
"go/ast"
"go/build"
"go/parser"
"go/token"
"os"
"path/filepath"
"strconv"
"sync"
)
var cwd string
func init() {
var err error
cwd, err = os.Getwd()
if err != nil {
panic("getcwd failed: " + err.Error())
}
}
// parsePackageFiles enumerates the files belonging to package path,
// then loads, parses and returns them.
//
// 'which' is a list of flags indicating which files to include:
// 'g': include non-test *.go source files (GoFiles)
// 't': include in-package *_test.go source files (TestGoFiles)
// 'x': include external *_test.go source files. (XTestGoFiles)
//
func parsePackageFiles(ctxt *build.Context, fset *token.FileSet, path string, which string) ([]*ast.File, error) {
// Set the "!cgo" go/build tag, preferring (dummy) Go to
// native C implementations of net.cgoLookupHost et al.
ctxt2 := *ctxt
ctxt2.CgoEnabled = false
// TODO(adonovan): fix: Do we need cwd? Shouldn't
// ImportDir(path) / $GOROOT suffice?
bp, err := ctxt2.Import(path, cwd, 0)
if _, ok := err.(*build.NoGoError); ok {
return nil, nil // empty directory
}
if err != nil {
return nil, err // import failed
}
var filenames []string
for _, c := range which {
var s []string
switch c {
case 'g':
s = bp.GoFiles
case 't':
s = bp.TestGoFiles
case 'x':
s = bp.XTestGoFiles
default:
panic(c)
}
filenames = append(filenames, s...)
}
return ParseFiles(fset, bp.Dir, filenames...)
}
// ParseFiles parses the Go source files files within directory dir
// and returns their ASTs, or the first parse error if any.
//
func ParseFiles(fset *token.FileSet, dir string, files ...string) ([]*ast.File, error) {
var wg sync.WaitGroup
n := len(files)
parsed := make([]*ast.File, n, n)
errors := make([]error, n, n)
for i, file := range files {
if !filepath.IsAbs(file) {
file = filepath.Join(dir, file)
}
wg.Add(1)
go func(i int, file string) {
parsed[i], errors[i] = parser.ParseFile(fset, file, nil, 0)
wg.Done()
}(i, file)
}
wg.Wait()
for _, err := range errors {
if err != nil {
return nil, err
}
}
return parsed, nil
}
// ---------- Internal helpers ----------
// unparen returns e with any enclosing parentheses stripped.
func unparen(e ast.Expr) ast.Expr {
for {
p, ok := e.(*ast.ParenExpr)
if !ok {
break
}
e = p.X
}
return e
}
func unreachable() {
panic("unreachable")
}
// importsOf returns the set of paths imported by the specified files.
func importsOf(p string, files []*ast.File) map[string]bool {
imports := make(map[string]bool)
outer:
for _, file := range files {
for _, decl := range file.Decls {
if decl, ok := decl.(*ast.GenDecl); ok {
if decl.Tok != token.IMPORT {
break outer // stop at the first non-import
}
for _, spec := range decl.Specs {
spec := spec.(*ast.ImportSpec)
if path, _ := strconv.Unquote(spec.Path.Value); path != "C" {
imports[path] = true
}
}
} else {
break outer // stop at the first non-import
}
}
}
return imports
}
go.tools/importer: don't pass srcDir=os.Getwd to go/build.Import().
This makes imports independent of the process's working
directory. (Perhaps this was a feature, but I haven't found a
situation in which it actually works.)
R=gri
CC=golang-dev
https://codereview.appspot.com/19420043
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package importer
// This file defines various utility functions exposed by the package
// and used by it.
import (
"go/ast"
"go/build"
"go/parser"
"go/token"
"path/filepath"
"strconv"
"sync"
)
// parsePackageFiles enumerates the files belonging to package path,
// then loads, parses and returns them.
//
// 'which' is a list of flags indicating which files to include:
// 'g': include non-test *.go source files (GoFiles)
// 't': include in-package *_test.go source files (TestGoFiles)
// 'x': include external *_test.go source files. (XTestGoFiles)
//
func parsePackageFiles(ctxt *build.Context, fset *token.FileSet, path string, which string) ([]*ast.File, error) {
// Set the "!cgo" go/build tag, preferring (dummy) Go to
// native C implementations of net.cgoLookupHost et al.
ctxt2 := *ctxt
ctxt2.CgoEnabled = false
// Import(srcDir="") disables local imports, e.g. import "./foo".
bp, err := ctxt2.Import(path, "", 0)
if _, ok := err.(*build.NoGoError); ok {
return nil, nil // empty directory
}
if err != nil {
return nil, err // import failed
}
var filenames []string
for _, c := range which {
var s []string
switch c {
case 'g':
s = bp.GoFiles
case 't':
s = bp.TestGoFiles
case 'x':
s = bp.XTestGoFiles
default:
panic(c)
}
filenames = append(filenames, s...)
}
return ParseFiles(fset, bp.Dir, filenames...)
}
// ParseFiles parses the Go source files files within directory dir
// and returns their ASTs, or the first parse error if any.
//
func ParseFiles(fset *token.FileSet, dir string, files ...string) ([]*ast.File, error) {
var wg sync.WaitGroup
n := len(files)
parsed := make([]*ast.File, n, n)
errors := make([]error, n, n)
for i, file := range files {
if !filepath.IsAbs(file) {
file = filepath.Join(dir, file)
}
wg.Add(1)
go func(i int, file string) {
parsed[i], errors[i] = parser.ParseFile(fset, file, nil, 0)
wg.Done()
}(i, file)
}
wg.Wait()
for _, err := range errors {
if err != nil {
return nil, err
}
}
return parsed, nil
}
// ---------- Internal helpers ----------
// unparen returns e with any enclosing parentheses stripped.
func unparen(e ast.Expr) ast.Expr {
for {
p, ok := e.(*ast.ParenExpr)
if !ok {
break
}
e = p.X
}
return e
}
func unreachable() {
panic("unreachable")
}
// importsOf returns the set of paths imported by the specified files.
func importsOf(p string, files []*ast.File) map[string]bool {
imports := make(map[string]bool)
outer:
for _, file := range files {
for _, decl := range file.Decls {
if decl, ok := decl.(*ast.GenDecl); ok {
if decl.Tok != token.IMPORT {
break outer // stop at the first non-import
}
for _, spec := range decl.Specs {
spec := spec.(*ast.ImportSpec)
if path, _ := strconv.Unquote(spec.Path.Value); path != "C" {
imports[path] = true
}
}
} else {
break outer // stop at the first non-import
}
}
}
return imports
}
|
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package docker
import (
"bytes"
"fmt"
"os"
"strings"
"time"
"github.com/coreos/go-semver/semver"
"github.com/coreos/pkg/capnslog"
"golang.org/x/crypto/ssh"
"golang.org/x/net/context"
"github.com/coreos/mantle/kola/cluster"
"github.com/coreos/mantle/kola/register"
"github.com/coreos/mantle/kola/skip"
"github.com/coreos/mantle/lang/worker"
"github.com/coreos/mantle/platform"
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/mantle", "kola/tests/docker")
)
func init() {
register.Register(®ister.Test{
Run: dockerResources,
ClusterSize: 1,
Name: "docker.resources",
UserData: `#cloud-config`,
// began shipping docker 1.10 in 949, which has all of the
// tested resource options.
MinVersion: semver.Version{Major: 949},
})
register.Register(®ister.Test{
Run: dockerNetwork,
ClusterSize: 2,
Name: "docker.network",
UserData: `#cloud-config`,
MinVersion: semver.Version{Major: 1192},
})
register.Register(®ister.Test{
Run: dockerOldClient,
ClusterSize: 1,
Name: "docker.oldclient",
UserData: `#cloud-config`,
MinVersion: semver.Version{Major: 1192},
})
register.Register(®ister.Test{
Run: dockerUserns,
ClusterSize: 1,
Name: "docker.userns",
// Source yaml:
// https://github.com/coreos/container-linux-config-transpiler
/*
systemd:
units:
- name: docker.service
enable: true
dropins:
- name: 10-uesrns.conf
contents: |-
[Service]
Environment=DOCKER_OPTS=--userns-remap=dockremap
storage:
files:
- filesystem: root
path: /etc/subuid
contents:
inline: "dockremap:100000:65536"
- filesystem: root
path: /etc/subgid
contents:
inline: "dockremap:100000:65536"
passwd:
users:
- name: dockremap
create: {}
*/
Platforms: []string{"aws", "gce"},
UserData: `{"ignition":{"version":"2.0.0","config":{}},"storage":{"files":[{"filesystem":"root","path":"/etc/subuid","contents":{"source":"data:,dockremap%3A100000%3A65536","verification":{}},"user":{},"group":{}},{"filesystem":"root","path":"/etc/subgid","contents":{"source":"data:,dockremap%3A100000%3A65536","verification":{}},"user":{},"group":{}}]},"systemd":{"units":[{"name":"docker.service","enable":true,"dropins":[{"name":"10-uesrns.conf","contents":"[Service]\nEnvironment=DOCKER_OPTS=--userns-remap=dockremap"}]}]},"networkd":{},"passwd":{"users":[{"name":"dockremap","create":{}}]}}`,
MinVersion: semver.Version{Major: 1192},
})
register.Register(®ister.Test{
Run: dockerNetworksReliably,
ClusterSize: 1,
Name: "docker.networks-reliably",
MinVersion: semver.Version{Major: 1192},
})
}
// make a docker container out of binaries on the host
func genDockerContainer(m platform.Machine, name string, binnames []string) error {
cmd := `tmpdir=$(mktemp -d); cd $tmpdir; echo -e "FROM scratch\nCOPY . /" > Dockerfile;
b=$(which %s); libs=$(sudo ldd $b | grep -o /lib'[^ ]*' | sort -u);
sudo rsync -av --relative --copy-links $b $libs ./;
sudo docker build -t %s .`
if output, err := m.SSH(fmt.Sprintf(cmd, strings.Join(binnames, " "), name)); err != nil {
return fmt.Errorf("failed to make %s container: output: %q status: %q", name, output, err)
}
return nil
}
// using a simple container, exercise various docker options that set resource
// limits. also acts as a regression test for
// https://github.com/coreos/bugs/issues/1246.
func dockerResources(c cluster.TestCluster) error {
m := c.Machines()[0]
plog.Debug("creating sleep container")
if err := genDockerContainer(m, "sleep", []string{"sleep"}); err != nil {
return err
}
dockerFmt := "docker run --rm %s sleep sleep 0.2"
dCmd := func(arg string) string {
return fmt.Sprintf(dockerFmt, arg)
}
ctx := context.Background()
wg := worker.NewWorkerGroup(ctx, 10)
// ref https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources
for _, dockerCmd := range []string{
// must set memory when setting memory-swap
dCmd("--memory=10m --memory-swap=10m"),
dCmd("--memory-reservation=10m"),
dCmd("--kernel-memory=10m"),
dCmd("--cpu-shares=100"),
dCmd("--cpu-period=1000"),
dCmd("--cpuset-cpus=0"),
dCmd("--cpuset-mems=0"),
dCmd("--cpu-quota=1000"),
dCmd("--blkio-weight=10"),
// none of these work in QEMU due to apparent lack of cfq for
// blkio in virtual block devices.
//dCmd("--blkio-weight-device=/dev/vda:10"),
//dCmd("--device-read-bps=/dev/vda:1kb"),
//dCmd("--device-write-bps=/dev/vda:1kb"),
//dCmd("--device-read-iops=/dev/vda:10"),
//dCmd("--device-write-iops=/dev/vda:10"),
dCmd("--memory=10m --oom-kill-disable=true"),
dCmd("--memory-swappiness=50"),
dCmd("--shm-size=1m"),
} {
plog.Debugf("Executing %q", dockerCmd)
// lol closures
cmd := dockerCmd
worker := func(c context.Context) error {
// TODO: pass context thru to SSH
output, err := m.SSH(cmd)
if err != nil {
return fmt.Errorf("failed to run %q: output: %q status: %q", dockerCmd, output, err)
}
return nil
}
if err := wg.Start(worker); err != nil {
return wg.WaitError(err)
}
}
return wg.Wait()
}
// Ensure that docker containers can make network connections outside of the host
func dockerNetwork(c cluster.TestCluster) error {
machines := c.Machines()
src, dest := machines[0], machines[1]
plog.Debug("creating ncat containers")
if err := genDockerContainer(src, "ncat", []string{"ncat"}); err != nil {
return err
}
if err := genDockerContainer(dest, "ncat", []string{"ncat"}); err != nil {
return err
}
listener := func(c context.Context) error {
// Will block until a message is recieved
out, err := dest.SSH(
`echo "HELLO FROM SERVER" | docker run -i -p 9988:9988 ncat ncat --idle-timeout 20 --listen 0.0.0.0 9988`,
)
if err != nil {
return err
}
if !bytes.Equal(out, []byte("HELLO FROM CLIENT")) {
return fmt.Errorf("unexpected result from listener: %q", out)
}
return nil
}
talker := func(c context.Context) error {
// Wait until listener is ready before trying anything
for {
_, err := dest.SSH("sudo lsof -i TCP:9988 -s TCP:LISTEN | grep 9988 -q")
if err == nil {
break // socket is ready
}
exit, ok := err.(*ssh.ExitError)
if !ok || exit.Waitmsg.ExitStatus() != 1 { // 1 is the expected exit of grep -q
return err
}
plog.Debug("waiting for server to be ready")
select {
case <-c.Done():
return fmt.Errorf("timeout waiting for server")
default:
time.Sleep(100 * time.Millisecond)
}
}
srcCmd := fmt.Sprintf(`echo "HELLO FROM CLIENT" | docker run -i ncat ncat %s 9988`, dest.PrivateIP())
out, err := src.SSH(srcCmd)
if err != nil {
return err
}
if !bytes.Equal(out, []byte("HELLO FROM SERVER")) {
return fmt.Errorf(`unexpected result from listener: "%v"`, out)
}
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
return worker.Parallel(ctx, listener, talker)
}
// Regression test for https://github.com/coreos/bugs/issues/1569 and
// https://github.com/coreos/docker/pull/31
func dockerOldClient(c cluster.TestCluster) error {
oldclient := "/usr/lib/kola/amd64/docker-1.9.1"
if _, err := os.Stat(oldclient); err != nil {
return skip.Skip(fmt.Sprintf("Can't find old docker client to test: %v", err))
}
c.DropFile(oldclient)
m := c.Machines()[0]
if err := genDockerContainer(m, "echo", []string{"echo"}); err != nil {
return err
}
output, err := m.SSH("/home/core/docker-1.9.1 run echo echo 'IT WORKED'")
if err != nil {
return fmt.Errorf("failed to run old docker client: %q status: %q", output, err)
}
if !bytes.Equal(output, []byte("IT WORKED")) {
return fmt.Errorf("unexpected result from docker client: %q", output)
}
return nil
}
// Regression test for userns breakage under 1.12
func dockerUserns(c cluster.TestCluster) error {
m := c.Machines()[0]
if err := genDockerContainer(m, "userns-test", []string{"echo", "sleep"}); err != nil {
return err
}
_, err := m.SSH(`sudo setenforce 1`)
if err != nil {
return fmt.Errorf("could not enable selinux")
}
output, err := m.SSH(`docker run userns-test echo fj.fj`)
if err != nil {
return fmt.Errorf("failed to run echo under userns: output: %q status: %q", output, err)
}
if !bytes.Equal(output, []byte("fj.fj")) {
return fmt.Errorf("expected fj.fj, got %s", string(output))
}
// And just in case, verify that a container really is userns remapped
_, err = m.SSH(`docker run -d --name=sleepy userns-test sleep 10000`)
if err != nil {
return fmt.Errorf("could not run sleep: %v", err)
}
uid_map, err := m.SSH(`until [[ "$(/usr/bin/docker inspect -f {{.State.Running}} sleepy)" == "true" ]]; do sleep 0.1; done;
pid=$(docker inspect -f {{.State.Pid}} sleepy);
cat /proc/$pid/uid_map; docker kill sleepy &>/dev/null`)
if err != nil {
return fmt.Errorf("could not read uid mapping: %v", err)
}
// uid_map is of the form `$mappedNamespacePidStart $realNamespacePidStart
// $rangeLength`. We expect `0 100000 65536`
mapParts := strings.Fields(strings.TrimSpace(string(uid_map)))
if len(mapParts) != 3 {
return fmt.Errorf("expected uid_map to have three parts, was: %s", string(uid_map))
}
if mapParts[0] != "0" && mapParts[1] != "100000" {
return fmt.Errorf("unexpected userns mapping values: %v", string(uid_map))
}
return nil
}
// Regression test for https://github.com/coreos/bugs/issues/1785
// Also, hopefully will catch any similar issues
func dockerNetworksReliably(c cluster.TestCluster) error {
m := c.Machines()[0]
if err := genDockerContainer(m, "ping", []string{"sh", "ping"}); err != nil {
return err
}
output, err := m.SSH(`seq 1 100 | xargs -i -n 1 -P 20 docker run ping sh -c 'out=$(ping -c 1 172.17.0.1 -w 1); if [[ "$?" != 0 ]]; then echo "{} FAIL"; echo "$out"; exit 1; else echo "{} PASS"; fi'`)
if err != nil {
return fmt.Errorf("could not run 100 containers pinging the bridge: %v: %q", err, string(output))
}
return nil
}
kola/tests/docker: define network test userdata
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package docker
import (
"bytes"
"fmt"
"os"
"strings"
"time"
"github.com/coreos/go-semver/semver"
"github.com/coreos/pkg/capnslog"
"golang.org/x/crypto/ssh"
"golang.org/x/net/context"
"github.com/coreos/mantle/kola/cluster"
"github.com/coreos/mantle/kola/register"
"github.com/coreos/mantle/kola/skip"
"github.com/coreos/mantle/lang/worker"
"github.com/coreos/mantle/platform"
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/mantle", "kola/tests/docker")
)
func init() {
register.Register(®ister.Test{
Run: dockerResources,
ClusterSize: 1,
Name: "docker.resources",
UserData: `#cloud-config`,
// began shipping docker 1.10 in 949, which has all of the
// tested resource options.
MinVersion: semver.Version{Major: 949},
})
register.Register(®ister.Test{
Run: dockerNetwork,
ClusterSize: 2,
Name: "docker.network",
UserData: `#cloud-config`,
MinVersion: semver.Version{Major: 1192},
})
register.Register(®ister.Test{
Run: dockerOldClient,
ClusterSize: 1,
Name: "docker.oldclient",
UserData: `#cloud-config`,
MinVersion: semver.Version{Major: 1192},
})
register.Register(®ister.Test{
Run: dockerUserns,
ClusterSize: 1,
Name: "docker.userns",
// Source yaml:
// https://github.com/coreos/container-linux-config-transpiler
/*
systemd:
units:
- name: docker.service
enable: true
dropins:
- name: 10-uesrns.conf
contents: |-
[Service]
Environment=DOCKER_OPTS=--userns-remap=dockremap
storage:
files:
- filesystem: root
path: /etc/subuid
contents:
inline: "dockremap:100000:65536"
- filesystem: root
path: /etc/subgid
contents:
inline: "dockremap:100000:65536"
passwd:
users:
- name: dockremap
create: {}
*/
Platforms: []string{"aws", "gce"},
UserData: `{"ignition":{"version":"2.0.0","config":{}},"storage":{"files":[{"filesystem":"root","path":"/etc/subuid","contents":{"source":"data:,dockremap%3A100000%3A65536","verification":{}},"user":{},"group":{}},{"filesystem":"root","path":"/etc/subgid","contents":{"source":"data:,dockremap%3A100000%3A65536","verification":{}},"user":{},"group":{}}]},"systemd":{"units":[{"name":"docker.service","enable":true,"dropins":[{"name":"10-uesrns.conf","contents":"[Service]\nEnvironment=DOCKER_OPTS=--userns-remap=dockremap"}]}]},"networkd":{},"passwd":{"users":[{"name":"dockremap","create":{}}]}}`,
MinVersion: semver.Version{Major: 1192},
})
register.Register(®ister.Test{
Run: dockerNetworksReliably,
ClusterSize: 1,
Name: "docker.networks-reliably",
UserData: `#cloud-config`,
MinVersion: semver.Version{Major: 1192},
})
}
// make a docker container out of binaries on the host
func genDockerContainer(m platform.Machine, name string, binnames []string) error {
cmd := `tmpdir=$(mktemp -d); cd $tmpdir; echo -e "FROM scratch\nCOPY . /" > Dockerfile;
b=$(which %s); libs=$(sudo ldd $b | grep -o /lib'[^ ]*' | sort -u);
sudo rsync -av --relative --copy-links $b $libs ./;
sudo docker build -t %s .`
if output, err := m.SSH(fmt.Sprintf(cmd, strings.Join(binnames, " "), name)); err != nil {
return fmt.Errorf("failed to make %s container: output: %q status: %q", name, output, err)
}
return nil
}
// using a simple container, exercise various docker options that set resource
// limits. also acts as a regression test for
// https://github.com/coreos/bugs/issues/1246.
func dockerResources(c cluster.TestCluster) error {
m := c.Machines()[0]
plog.Debug("creating sleep container")
if err := genDockerContainer(m, "sleep", []string{"sleep"}); err != nil {
return err
}
dockerFmt := "docker run --rm %s sleep sleep 0.2"
dCmd := func(arg string) string {
return fmt.Sprintf(dockerFmt, arg)
}
ctx := context.Background()
wg := worker.NewWorkerGroup(ctx, 10)
// ref https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources
for _, dockerCmd := range []string{
// must set memory when setting memory-swap
dCmd("--memory=10m --memory-swap=10m"),
dCmd("--memory-reservation=10m"),
dCmd("--kernel-memory=10m"),
dCmd("--cpu-shares=100"),
dCmd("--cpu-period=1000"),
dCmd("--cpuset-cpus=0"),
dCmd("--cpuset-mems=0"),
dCmd("--cpu-quota=1000"),
dCmd("--blkio-weight=10"),
// none of these work in QEMU due to apparent lack of cfq for
// blkio in virtual block devices.
//dCmd("--blkio-weight-device=/dev/vda:10"),
//dCmd("--device-read-bps=/dev/vda:1kb"),
//dCmd("--device-write-bps=/dev/vda:1kb"),
//dCmd("--device-read-iops=/dev/vda:10"),
//dCmd("--device-write-iops=/dev/vda:10"),
dCmd("--memory=10m --oom-kill-disable=true"),
dCmd("--memory-swappiness=50"),
dCmd("--shm-size=1m"),
} {
plog.Debugf("Executing %q", dockerCmd)
// lol closures
cmd := dockerCmd
worker := func(c context.Context) error {
// TODO: pass context thru to SSH
output, err := m.SSH(cmd)
if err != nil {
return fmt.Errorf("failed to run %q: output: %q status: %q", dockerCmd, output, err)
}
return nil
}
if err := wg.Start(worker); err != nil {
return wg.WaitError(err)
}
}
return wg.Wait()
}
// Ensure that docker containers can make network connections outside of the host
func dockerNetwork(c cluster.TestCluster) error {
machines := c.Machines()
src, dest := machines[0], machines[1]
plog.Debug("creating ncat containers")
if err := genDockerContainer(src, "ncat", []string{"ncat"}); err != nil {
return err
}
if err := genDockerContainer(dest, "ncat", []string{"ncat"}); err != nil {
return err
}
listener := func(c context.Context) error {
// Will block until a message is recieved
out, err := dest.SSH(
`echo "HELLO FROM SERVER" | docker run -i -p 9988:9988 ncat ncat --idle-timeout 20 --listen 0.0.0.0 9988`,
)
if err != nil {
return err
}
if !bytes.Equal(out, []byte("HELLO FROM CLIENT")) {
return fmt.Errorf("unexpected result from listener: %q", out)
}
return nil
}
talker := func(c context.Context) error {
// Wait until listener is ready before trying anything
for {
_, err := dest.SSH("sudo lsof -i TCP:9988 -s TCP:LISTEN | grep 9988 -q")
if err == nil {
break // socket is ready
}
exit, ok := err.(*ssh.ExitError)
if !ok || exit.Waitmsg.ExitStatus() != 1 { // 1 is the expected exit of grep -q
return err
}
plog.Debug("waiting for server to be ready")
select {
case <-c.Done():
return fmt.Errorf("timeout waiting for server")
default:
time.Sleep(100 * time.Millisecond)
}
}
srcCmd := fmt.Sprintf(`echo "HELLO FROM CLIENT" | docker run -i ncat ncat %s 9988`, dest.PrivateIP())
out, err := src.SSH(srcCmd)
if err != nil {
return err
}
if !bytes.Equal(out, []byte("HELLO FROM SERVER")) {
return fmt.Errorf(`unexpected result from listener: "%v"`, out)
}
return nil
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
return worker.Parallel(ctx, listener, talker)
}
// Regression test for https://github.com/coreos/bugs/issues/1569 and
// https://github.com/coreos/docker/pull/31
func dockerOldClient(c cluster.TestCluster) error {
oldclient := "/usr/lib/kola/amd64/docker-1.9.1"
if _, err := os.Stat(oldclient); err != nil {
return skip.Skip(fmt.Sprintf("Can't find old docker client to test: %v", err))
}
c.DropFile(oldclient)
m := c.Machines()[0]
if err := genDockerContainer(m, "echo", []string{"echo"}); err != nil {
return err
}
output, err := m.SSH("/home/core/docker-1.9.1 run echo echo 'IT WORKED'")
if err != nil {
return fmt.Errorf("failed to run old docker client: %q status: %q", output, err)
}
if !bytes.Equal(output, []byte("IT WORKED")) {
return fmt.Errorf("unexpected result from docker client: %q", output)
}
return nil
}
// Regression test for userns breakage under 1.12
func dockerUserns(c cluster.TestCluster) error {
m := c.Machines()[0]
if err := genDockerContainer(m, "userns-test", []string{"echo", "sleep"}); err != nil {
return err
}
_, err := m.SSH(`sudo setenforce 1`)
if err != nil {
return fmt.Errorf("could not enable selinux")
}
output, err := m.SSH(`docker run userns-test echo fj.fj`)
if err != nil {
return fmt.Errorf("failed to run echo under userns: output: %q status: %q", output, err)
}
if !bytes.Equal(output, []byte("fj.fj")) {
return fmt.Errorf("expected fj.fj, got %s", string(output))
}
// And just in case, verify that a container really is userns remapped
_, err = m.SSH(`docker run -d --name=sleepy userns-test sleep 10000`)
if err != nil {
return fmt.Errorf("could not run sleep: %v", err)
}
uid_map, err := m.SSH(`until [[ "$(/usr/bin/docker inspect -f {{.State.Running}} sleepy)" == "true" ]]; do sleep 0.1; done;
pid=$(docker inspect -f {{.State.Pid}} sleepy);
cat /proc/$pid/uid_map; docker kill sleepy &>/dev/null`)
if err != nil {
return fmt.Errorf("could not read uid mapping: %v", err)
}
// uid_map is of the form `$mappedNamespacePidStart $realNamespacePidStart
// $rangeLength`. We expect `0 100000 65536`
mapParts := strings.Fields(strings.TrimSpace(string(uid_map)))
if len(mapParts) != 3 {
return fmt.Errorf("expected uid_map to have three parts, was: %s", string(uid_map))
}
if mapParts[0] != "0" && mapParts[1] != "100000" {
return fmt.Errorf("unexpected userns mapping values: %v", string(uid_map))
}
return nil
}
// Regression test for https://github.com/coreos/bugs/issues/1785
// Also, hopefully will catch any similar issues
func dockerNetworksReliably(c cluster.TestCluster) error {
m := c.Machines()[0]
if err := genDockerContainer(m, "ping", []string{"sh", "ping"}); err != nil {
return err
}
output, err := m.SSH(`seq 1 100 | xargs -i -n 1 -P 20 docker run ping sh -c 'out=$(ping -c 1 172.17.0.1 -w 1); if [[ "$?" != 0 ]]; then echo "{} FAIL"; echo "$out"; exit 1; else echo "{} PASS"; fi'`)
if err != nil {
return fmt.Errorf("could not run 100 containers pinging the bridge: %v: %q", err, string(output))
}
return nil
}
|
package token
// Type is the type of a token
type Type string
const (
// EOF is at the end of every file
EOF = "EOF"
// Illegal is any non-recognized character
Illegal = "illegal"
// Number is a number literal (123.456)
Number = "number"
// String is a string literal ("foo")
String = "string"
// Char is a character literal ('x')
Char = "char"
// ID is an identifier (bar)
ID = "identifier"
// Param is a parameter ($baz)
Param = "parameter"
// Plus is the addition operator (+)
Plus = "plus"
// Minus is the subtraction operator (-)
Minus = "minus"
// Star is the multiplication operator (*)
Star = "star"
// Exp is the exponentiation operator (**)
Exp = "exponent"
// Slash is the division operator (/)
Slash = "slash"
// FloorDiv is the floor division operator (//)
FloorDiv = "floor-div"
// Mod is the modulo operator (%)
Mod = "modulo"
// BackSlash is a backslash (\)
BackSlash = "backslash"
// LeftParen is a left paren '('
LeftParen = "lparen"
// RightParen is a right paren ')'
RightParen = "rparen"
// LessThan is the less than operator (<)
LessThan = "less-than"
// GreaterThan is the greater than operator (>)
GreaterThan = "greater-than"
// LessThanEq is the less than or equal to operator (<=)
LessThanEq = "less-than-or-equal"
// GreaterThanEq is the greater than or equal to operator (>=)
GreaterThanEq = "greater-than-or-equal"
// LeftBrace is a left brace ({)
LeftBrace = "left-brace"
// RightBrace is a right brace (})
RightBrace = "right-brace"
// LeftSquare is a left square bracket ([)
LeftSquare = "left-square"
// RightSquare is a right square bracket (])
RightSquare = "right-square"
// Semi is a semi-colon, either in the source or ASI'd
Semi = "semi"
// Equal is the equality test operator (==)
Equal = "equal"
// NotEqual is the inverted equality test operator (!=)
NotEqual = "not-equal"
// Or is the or operator (||)
Or = "or"
// And is the and operator (&&)
And = "and"
// BitOr is the bitwise or operator (|)
BitOr = "bitwise-or"
// BitAnd is the bitwise and operator (&)
BitAnd = "bitwise-and"
// Assign is the assign operator (=)
Assign = "assign"
// Declare is the declare operator (:=)
Declare = "declare"
// Comma is a comma (,)
Comma = "comma"
// Arrow is a right-facing arrow (->)
Arrow = "arrow"
// Colon is a colon (:)
Colon = "colon"
// QuestionMark is the question-mark operator (?)
QuestionMark = "question-mark"
// Dot is the dot-access operator (.)
Dot = "dot"
// FatArrow is the fat arrow (=>)
FatArrow = "fat-arrow"
// Bang is an exclaimation mark (!)
Bang = "bang"
// DoubleColon is two colons (::)
DoubleColon = "double-colon"
// PlusEquals is the addition-assignment operator (+=)
PlusEquals = "assign-plus"
// MinusEquals is the subtraction-assignment operator (-=)
MinusEquals = "assign-minus"
// StarEquals is the multiplication-assignment operator (*=)
StarEquals = "assign-star"
// ExpEquals is the exponentiation-assignment operator (**=)
ExpEquals = "assign-exponent"
// SlashEquals is the division-assignment operator (/=)
SlashEquals = "assign-slash"
// FloorDivEquals is the floor-division-assignment operator (//=)
FloorDivEquals = "assign-floor-div"
// ModEquals is the modulo-assignment operator (%=)
ModEquals = "assign-modulo"
// OrEquals is the or-assignment operator (||=)
OrEquals = "assign-or"
// AndEquals is the and-assignment operator (&&=)
AndEquals = "assign-and"
// BitOrEquals is the bitwise-or-assignment operator (|=)
BitOrEquals = "assign-bitwise-or"
// BitAndEquals is the bitwise-and-assignment operator (&=)
BitAndEquals = "assign-bitwise-and"
// QuestionMarkEquals is the question-mark-assignment operator (?=)
QuestionMarkEquals = "assign-question-mark"
// Def is the 'def' keyword
Def = "def"
// Return is the 'return' keyword
Return = "return"
// True is the 'true' keyword
True = "true"
// False is the 'false' keyword
False = "false"
// Null is the 'null' keyword
Null = "null"
// If is the 'if' keyword
If = "if"
// Else is the 'else' keyword
Else = "else"
// Elif is the 'elif' keyword
Elif = "elif"
// While is the 'while' keyword
While = "while"
// For is the 'for' keyword
For = "for"
// Next is the 'next' keyword
Next = "next"
// Break is the 'break' keyword
Break = "break"
// Class is the 'class' keyword
Class = "class"
// Extends is the 'extends' keyword
Extends = "extends"
// Init is the 'init' keyword
Init = "init"
// Match is the 'match' keyword
Match = "match"
// Try is the 'try' keyword
Try = "try"
// Catch is the 'catch' keyword
Catch = "catch"
// Import is the 'import' keyword
Import = "import"
// Use is the 'use' keyword
Use = "use"
)
Change string for left and right parens
package token
// Type is the type of a token
type Type string
const (
// EOF is at the end of every file
EOF = "EOF"
// Illegal is any non-recognized character
Illegal = "illegal"
// Number is a number literal (123.456)
Number = "number"
// String is a string literal ("foo")
String = "string"
// Char is a character literal ('x')
Char = "char"
// ID is an identifier (bar)
ID = "identifier"
// Param is a parameter ($baz)
Param = "parameter"
// Plus is the addition operator (+)
Plus = "plus"
// Minus is the subtraction operator (-)
Minus = "minus"
// Star is the multiplication operator (*)
Star = "star"
// Exp is the exponentiation operator (**)
Exp = "exponent"
// Slash is the division operator (/)
Slash = "slash"
// FloorDiv is the floor division operator (//)
FloorDiv = "floor-div"
// Mod is the modulo operator (%)
Mod = "modulo"
// BackSlash is a backslash (\)
BackSlash = "backslash"
// LeftParen is a left paren '('
LeftParen = "left-paren"
// RightParen is a right paren ')'
RightParen = "right-paren"
// LessThan is the less than operator (<)
LessThan = "less-than"
// GreaterThan is the greater than operator (>)
GreaterThan = "greater-than"
// LessThanEq is the less than or equal to operator (<=)
LessThanEq = "less-than-or-equal"
// GreaterThanEq is the greater than or equal to operator (>=)
GreaterThanEq = "greater-than-or-equal"
// LeftBrace is a left brace ({)
LeftBrace = "left-brace"
// RightBrace is a right brace (})
RightBrace = "right-brace"
// LeftSquare is a left square bracket ([)
LeftSquare = "left-square"
// RightSquare is a right square bracket (])
RightSquare = "right-square"
// Semi is a semi-colon, either in the source or ASI'd
Semi = "semi"
// Equal is the equality test operator (==)
Equal = "equal"
// NotEqual is the inverted equality test operator (!=)
NotEqual = "not-equal"
// Or is the or operator (||)
Or = "or"
// And is the and operator (&&)
And = "and"
// BitOr is the bitwise or operator (|)
BitOr = "bitwise-or"
// BitAnd is the bitwise and operator (&)
BitAnd = "bitwise-and"
// Assign is the assign operator (=)
Assign = "assign"
// Declare is the declare operator (:=)
Declare = "declare"
// Comma is a comma (,)
Comma = "comma"
// Arrow is a right-facing arrow (->)
Arrow = "arrow"
// Colon is a colon (:)
Colon = "colon"
// QuestionMark is the question-mark operator (?)
QuestionMark = "question-mark"
// Dot is the dot-access operator (.)
Dot = "dot"
// FatArrow is the fat arrow (=>)
FatArrow = "fat-arrow"
// Bang is an exclaimation mark (!)
Bang = "bang"
// DoubleColon is two colons (::)
DoubleColon = "double-colon"
// PlusEquals is the addition-assignment operator (+=)
PlusEquals = "assign-plus"
// MinusEquals is the subtraction-assignment operator (-=)
MinusEquals = "assign-minus"
// StarEquals is the multiplication-assignment operator (*=)
StarEquals = "assign-star"
// ExpEquals is the exponentiation-assignment operator (**=)
ExpEquals = "assign-exponent"
// SlashEquals is the division-assignment operator (/=)
SlashEquals = "assign-slash"
// FloorDivEquals is the floor-division-assignment operator (//=)
FloorDivEquals = "assign-floor-div"
// ModEquals is the modulo-assignment operator (%=)
ModEquals = "assign-modulo"
// OrEquals is the or-assignment operator (||=)
OrEquals = "assign-or"
// AndEquals is the and-assignment operator (&&=)
AndEquals = "assign-and"
// BitOrEquals is the bitwise-or-assignment operator (|=)
BitOrEquals = "assign-bitwise-or"
// BitAndEquals is the bitwise-and-assignment operator (&=)
BitAndEquals = "assign-bitwise-and"
// QuestionMarkEquals is the question-mark-assignment operator (?=)
QuestionMarkEquals = "assign-question-mark"
// Def is the 'def' keyword
Def = "def"
// Return is the 'return' keyword
Return = "return"
// True is the 'true' keyword
True = "true"
// False is the 'false' keyword
False = "false"
// Null is the 'null' keyword
Null = "null"
// If is the 'if' keyword
If = "if"
// Else is the 'else' keyword
Else = "else"
// Elif is the 'elif' keyword
Elif = "elif"
// While is the 'while' keyword
While = "while"
// For is the 'for' keyword
For = "for"
// Next is the 'next' keyword
Next = "next"
// Break is the 'break' keyword
Break = "break"
// Class is the 'class' keyword
Class = "class"
// Extends is the 'extends' keyword
Extends = "extends"
// Init is the 'init' keyword
Init = "init"
// Match is the 'match' keyword
Match = "match"
// Try is the 'try' keyword
Try = "try"
// Catch is the 'catch' keyword
Catch = "catch"
// Import is the 'import' keyword
Import = "import"
// Use is the 'use' keyword
Use = "use"
)
|
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package container creates and manipulates containers.
package container
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"github.com/cenkalti/backoff"
"github.com/gofrs/flock"
specs "github.com/opencontainers/runtime-spec/specs-go"
"gvisor.googlesource.com/gvisor/pkg/log"
"gvisor.googlesource.com/gvisor/pkg/sentry/control"
"gvisor.googlesource.com/gvisor/runsc/boot"
"gvisor.googlesource.com/gvisor/runsc/cgroup"
"gvisor.googlesource.com/gvisor/runsc/sandbox"
"gvisor.googlesource.com/gvisor/runsc/specutils"
)
const (
// metadataFilename is the name of the metadata file relative to the
// container root directory that holds sandbox metadata.
metadataFilename = "meta.json"
// metadataLockFilename is the name of a lock file in the container
// root directory that is used to prevent concurrent modifications to
// the container state and metadata.
metadataLockFilename = "meta.lock"
)
// validateID validates the container id.
func validateID(id string) error {
// See libcontainer/factory_linux.go.
idRegex := regexp.MustCompile(`^[\w+-\.]+$`)
if !idRegex.MatchString(id) {
return fmt.Errorf("invalid container id: %v", id)
}
return nil
}
// Container represents a containerized application. When running, the
// container is associated with a single Sandbox.
//
// Container metadata can be saved and loaded to disk. Within a root directory,
// we maintain subdirectories for each container named with the container id.
// The container metadata is stored as a json within the container directory
// in a file named "meta.json". This metadata format is defined by us and is
// not part of the OCI spec.
//
// Containers must write their metadata files after any change to their internal
// states. The entire container directory is deleted when the container is
// destroyed.
//
// When the container is stopped, all processes that belong to the container
// must be stopped before Destroy() returns. containerd makes roughly the
// following calls to stop a container:
// - First it attempts to kill the container process with
// 'runsc kill SIGTERM'. After some time, it escalates to SIGKILL. In a
// separate thread, it's waiting on the container. As soon as the wait
// returns, it moves on to the next step:
// - It calls 'runsc kill --all SIGKILL' to stop every process that belongs to
// the container. 'kill --all SIGKILL' waits for all processes before
// returning.
// - Containerd waits for stdin, stdout and stderr to drain and be closed.
// - It calls 'runsc delete'. runc implementation kills --all SIGKILL once
// again just to be sure, waits, and then proceeds with remaining teardown.
//
type Container struct {
// ID is the container ID.
ID string `json:"id"`
// Spec is the OCI runtime spec that configures this container.
Spec *specs.Spec `json:"spec"`
// BundleDir is the directory containing the container bundle.
BundleDir string `json:"bundleDir"`
// Root is the directory containing the container metadata file.
Root string `json:"root"`
// CreatedAt is the time the container was created.
CreatedAt time.Time `json:"createdAt"`
// Owner is the container owner.
Owner string `json:"owner"`
// ConsoleSocket is the path to a unix domain socket that will receive
// the console FD.
ConsoleSocket string `json:"consoleSocket"`
// Status is the current container Status.
Status Status `json:"status"`
// GoferPid is the PID of the gofer running along side the sandbox. May
// be 0 if the gofer has been killed.
GoferPid int `json:"goferPid"`
// goferIsChild is set if a gofer process is a child of the current process.
//
// This field isn't saved to json, because only a creator of a gofer
// process will have it as a child process.
goferIsChild bool
// Sandbox is the sandbox this container is running in. It's set when the
// container is created and reset when the sandbox is destroyed.
Sandbox *sandbox.Sandbox `json:"sandbox"`
}
// Load loads a container with the given id from a metadata file. id may be an
// abbreviation of the full container id, in which case Load loads the
// container to which id unambiguously refers to.
// Returns ErrNotExist if container doesn't exist.
func Load(rootDir, id string) (*Container, error) {
log.Debugf("Load container %q %q", rootDir, id)
if err := validateID(id); err != nil {
return nil, fmt.Errorf("validating id: %v", err)
}
cRoot, err := findContainerRoot(rootDir, id)
if err != nil {
// Preserve error so that callers can distinguish 'not found' errors.
return nil, err
}
// Lock the container metadata to prevent other runsc instances from
// writing to it while we are reading it.
unlock, err := lockContainerMetadata(cRoot)
if err != nil {
return nil, err
}
defer unlock()
// Read the container metadata file and create a new Container from it.
metaFile := filepath.Join(cRoot, metadataFilename)
metaBytes, err := ioutil.ReadFile(metaFile)
if err != nil {
if os.IsNotExist(err) {
// Preserve error so that callers can distinguish 'not found' errors.
return nil, err
}
return nil, fmt.Errorf("reading container metadata file %q: %v", metaFile, err)
}
var c Container
if err := json.Unmarshal(metaBytes, &c); err != nil {
return nil, fmt.Errorf("unmarshaling container metadata from %q: %v", metaFile, err)
}
// If the status is "Running" or "Created", check that the sandbox
// process still exists, and set it to Stopped if it does not.
//
// This is inherently racey.
if c.Status == Running || c.Status == Created {
// Check if the sandbox process is still running.
if !c.isSandboxRunning() {
// Sandbox no longer exists, so this container definitely does not exist.
c.changeStatus(Stopped)
} else if c.Status == Running {
// Container state should reflect the actual state of the application, so
// we don't consider gofer process here.
if err := c.SignalContainer(syscall.Signal(0), false); err != nil {
c.changeStatus(Stopped)
}
}
}
return &c, nil
}
func findContainerRoot(rootDir, partialID string) (string, error) {
// Check whether the id fully specifies an existing container.
cRoot := filepath.Join(rootDir, partialID)
if _, err := os.Stat(cRoot); err == nil {
return cRoot, nil
}
// Now see whether id could be an abbreviation of exactly 1 of the
// container ids. If id is ambigious (it could match more than 1
// container), it is an error.
cRoot = ""
ids, err := List(rootDir)
if err != nil {
return "", err
}
for _, id := range ids {
if strings.HasPrefix(id, partialID) {
if cRoot != "" {
return "", fmt.Errorf("id %q is ambiguous and could refer to multiple containers: %q, %q", partialID, cRoot, id)
}
cRoot = id
}
}
if cRoot == "" {
return "", os.ErrNotExist
}
log.Debugf("abbreviated id %q resolves to full id %q", partialID, cRoot)
return filepath.Join(rootDir, cRoot), nil
}
// List returns all container ids in the given root directory.
func List(rootDir string) ([]string, error) {
log.Debugf("List containers %q", rootDir)
fs, err := ioutil.ReadDir(rootDir)
if err != nil {
return nil, fmt.Errorf("reading dir %q: %v", rootDir, err)
}
var out []string
for _, f := range fs {
out = append(out, f.Name())
}
return out, nil
}
// Create creates the container in a new Sandbox process, unless the metadata
// indicates that an existing Sandbox should be used. The caller must call
// Destroy() on the container.
func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile, userLog string) (*Container, error) {
log.Debugf("Create container %q in root dir: %s", id, conf.RootDir)
if err := validateID(id); err != nil {
return nil, err
}
// Lock the container metadata file to prevent concurrent creations of
// containers with the same id.
containerRoot := filepath.Join(conf.RootDir, id)
unlock, err := lockContainerMetadata(containerRoot)
if err != nil {
return nil, err
}
defer unlock()
// Check if the container already exists by looking for the metadata
// file.
if _, err := os.Stat(filepath.Join(containerRoot, metadataFilename)); err == nil {
return nil, fmt.Errorf("container with id %q already exists", id)
} else if !os.IsNotExist(err) {
return nil, fmt.Errorf("looking for existing container in %q: %v", containerRoot, err)
}
c := &Container{
ID: id,
Spec: spec,
ConsoleSocket: consoleSocket,
BundleDir: bundleDir,
Root: containerRoot,
Status: Creating,
CreatedAt: time.Now(),
Owner: os.Getenv("USER"),
}
// The Cleanup object cleans up partially created containers when an error occurs.
// Any errors occuring during cleanup itself are ignored.
cu := specutils.MakeCleanup(func() { _ = c.Destroy() })
defer cu.Clean()
// If the metadata annotations indicate that this container should be
// started in an existing sandbox, we must do so. The metadata will
// indicate the ID of the sandbox, which is the same as the ID of the
// init container in the sandbox.
if specutils.ShouldCreateSandbox(spec) {
log.Debugf("Creating new sandbox for container %q", id)
// Create and join cgroup before processes are created to ensure they are
// part of the cgroup from the start (and all tneir children processes).
cg, err := cgroup.New(spec)
if err != nil {
return nil, err
}
if cg != nil {
// If there is cgroup config, install it before creating sandbox process.
if err := cg.Install(spec.Linux.Resources); err != nil {
return nil, fmt.Errorf("configuring cgroup: %v", err)
}
}
if err := runInCgroup(cg, func() error {
ioFiles, specFile, err := c.createGoferProcess(spec, conf, bundleDir)
if err != nil {
return err
}
// Start a new sandbox for this container. Any errors after this point
// must destroy the container.
c.Sandbox, err = sandbox.New(id, spec, conf, bundleDir, consoleSocket, userLog, ioFiles, specFile, cg)
return err
}); err != nil {
return nil, err
}
} else {
// This is sort of confusing. For a sandbox with a root
// container and a child container in it, runsc sees:
// * A container struct whose sandbox ID is equal to the
// container ID. This is the root container that is tied to
// the creation of the sandbox.
// * A container struct whose sandbox ID is equal to the above
// container/sandbox ID, but that has a different container
// ID. This is the child container.
sbid, ok := specutils.SandboxID(spec)
if !ok {
return nil, fmt.Errorf("no sandbox ID found when creating container")
}
log.Debugf("Creating new container %q in sandbox %q", c.ID, sbid)
// Find the sandbox associated with this ID.
sb, err := Load(conf.RootDir, sbid)
if err != nil {
return nil, err
}
c.Sandbox = sb.Sandbox
if err := c.Sandbox.CreateContainer(c.ID); err != nil {
return nil, err
}
}
c.changeStatus(Created)
// Save the metadata file.
if err := c.save(); err != nil {
return nil, err
}
// Write the PID file. Containerd considers the create complete after
// this file is created, so it must be the last thing we do.
if pidFile != "" {
if err := ioutil.WriteFile(pidFile, []byte(strconv.Itoa(c.SandboxPid())), 0644); err != nil {
return nil, fmt.Errorf("error writing PID file: %v", err)
}
}
cu.Release()
return c, nil
}
// Start starts running the containerized process inside the sandbox.
func (c *Container) Start(conf *boot.Config) error {
log.Debugf("Start container %q", c.ID)
unlock, err := c.lock()
if err != nil {
return err
}
defer unlock()
if err := c.requireStatus("start", Created); err != nil {
return err
}
// "If any prestart hook fails, the runtime MUST generate an error,
// stop and destroy the container" -OCI spec.
if c.Spec.Hooks != nil {
if err := executeHooks(c.Spec.Hooks.Prestart, c.State()); err != nil {
return err
}
}
if specutils.ShouldCreateSandbox(c.Spec) {
if err := c.Sandbox.StartRoot(c.Spec, conf); err != nil {
return err
}
} else {
// Join cgroup to strt gofer process to ensure it's part of the cgroup from
// the start (and all tneir children processes).
if err := runInCgroup(c.Sandbox.Cgroup, func() error {
// Create the gofer process.
ioFiles, mountsFile, err := c.createGoferProcess(c.Spec, conf, c.BundleDir)
if err != nil {
return err
}
defer mountsFile.Close()
cleanMounts, err := specutils.ReadMounts(mountsFile)
if err != nil {
return fmt.Errorf("reading mounts file: %v", err)
}
c.Spec.Mounts = cleanMounts
return c.Sandbox.StartContainer(c.Spec, conf, c.ID, ioFiles)
}); err != nil {
return err
}
}
// "If any poststart hook fails, the runtime MUST log a warning, but
// the remaining hooks and lifecycle continue as if the hook had
// succeeded" -OCI spec.
if c.Spec.Hooks != nil {
executeHooksBestEffort(c.Spec.Hooks.Poststart, c.State())
}
c.changeStatus(Running)
return c.save()
}
// Restore takes a container and replaces its kernel and file system
// to restore a container from its state file.
func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile string) error {
log.Debugf("Restore container %q", c.ID)
unlock, err := c.lock()
if err != nil {
return err
}
defer unlock()
if err := c.requireStatus("restore", Created); err != nil {
return err
}
if err := c.Sandbox.Restore(c.ID, spec, conf, restoreFile); err != nil {
return err
}
c.changeStatus(Running)
return c.save()
}
// Run is a helper that calls Create + Start + Wait.
func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile, userLog string) (syscall.WaitStatus, error) {
log.Debugf("Run container %q in root dir: %s", id, conf.RootDir)
c, err := Create(id, spec, conf, bundleDir, consoleSocket, pidFile, userLog)
if err != nil {
return 0, fmt.Errorf("creating container: %v", err)
}
// Clean up partially created container if an error ocurrs.
// Any errors returned by Destroy() itself are ignored.
defer c.Destroy()
if err := c.Start(conf); err != nil {
return 0, fmt.Errorf("starting container: %v", err)
}
return c.Wait()
}
// Execute runs the specified command in the container. It returns the PID of
// the newly created process.
func (c *Container) Execute(args *control.ExecArgs) (int32, error) {
log.Debugf("Execute in container %q, args: %+v", c.ID, args)
if err := c.requireStatus("execute in", Created, Running); err != nil {
return 0, err
}
args.ContainerID = c.ID
return c.Sandbox.Execute(args)
}
// Event returns events for the container.
func (c *Container) Event() (*boot.Event, error) {
log.Debugf("Getting events for container %q", c.ID)
if err := c.requireStatus("get events for", Created, Running, Paused); err != nil {
return nil, err
}
return c.Sandbox.Event(c.ID)
}
// SandboxPid returns the Pid of the sandbox the container is running in, or -1 if the
// container is not running.
func (c *Container) SandboxPid() int {
if err := c.requireStatus("get PID", Created, Running, Paused); err != nil {
return -1
}
return c.Sandbox.Pid
}
// Wait waits for the container to exit, and returns its WaitStatus.
// Call to wait on a stopped container is needed to retrieve the exit status
// and wait returns immediately.
func (c *Container) Wait() (syscall.WaitStatus, error) {
log.Debugf("Wait on container %q", c.ID)
return c.Sandbox.Wait(c.ID)
}
// WaitRootPID waits for process 'pid' in the sandbox's PID namespace and
// returns its WaitStatus.
func (c *Container) WaitRootPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) {
log.Debugf("Wait on PID %d in sandbox %q", pid, c.Sandbox.ID)
if !c.isSandboxRunning() {
return 0, fmt.Errorf("sandbox is not running")
}
return c.Sandbox.WaitPID(c.Sandbox.ID, pid, clearStatus)
}
// WaitPID waits for process 'pid' in the container's PID namespace and returns
// its WaitStatus.
func (c *Container) WaitPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) {
log.Debugf("Wait on PID %d in container %q", pid, c.ID)
if !c.isSandboxRunning() {
return 0, fmt.Errorf("sandbox is not running")
}
return c.Sandbox.WaitPID(c.ID, pid, clearStatus)
}
// SignalContainer sends the signal to the container. If all is true and signal
// is SIGKILL, then waits for all processes to exit before returning.
// SignalContainer returns an error if the container is already stopped.
// TODO: Distinguish different error types.
func (c *Container) SignalContainer(sig syscall.Signal, all bool) error {
log.Debugf("Signal container %q: %v", c.ID, sig)
// Signaling container in Stopped state is allowed. When all=false,
// an error will be returned anyway; when all=true, this allows
// sending signal to other processes inside the container even
// after the init process exits. This is especially useful for
// container cleanup.
if err := c.requireStatus("signal", Running, Stopped); err != nil {
return err
}
if !c.isSandboxRunning() {
return fmt.Errorf("sandbox is not running")
}
return c.Sandbox.SignalContainer(c.ID, sig, all)
}
// SignalProcess sends sig to a specific process in the container.
func (c *Container) SignalProcess(sig syscall.Signal, pid int32) error {
log.Debugf("Signal process %d in container %q: %v", pid, c.ID, sig)
if err := c.requireStatus("signal a process inside", Running); err != nil {
return err
}
if !c.isSandboxRunning() {
return fmt.Errorf("sandbox is not running")
}
return c.Sandbox.SignalProcess(c.ID, int32(pid), sig, false)
}
// ForwardSignals forwards all signals received by the current process to the
// container process inside the sandbox. It returns a function that will stop
// forwarding signals.
func (c *Container) ForwardSignals(pid int32, fgProcess bool) func() {
log.Debugf("Forwarding all signals to container %q PID %d fgProcess=%t", c.ID, pid, fgProcess)
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh)
go func() {
for s := range sigCh {
log.Debugf("Forwarding signal %d to container %q PID %d fgProcess=%t", s, c.ID, pid, fgProcess)
if err := c.Sandbox.SignalProcess(c.ID, pid, s.(syscall.Signal), fgProcess); err != nil {
log.Warningf("error forwarding signal %d to container %q: %v", s, c.ID, err)
}
}
log.Debugf("Done forwarding signals to container %q PID %d fgProcess=%t", c.ID, pid, fgProcess)
}()
return func() {
signal.Stop(sigCh)
close(sigCh)
}
}
// Checkpoint sends the checkpoint call to the container.
// The statefile will be written to f, the file at the specified image-path.
func (c *Container) Checkpoint(f *os.File) error {
log.Debugf("Checkpoint container %q", c.ID)
if err := c.requireStatus("checkpoint", Created, Running, Paused); err != nil {
return err
}
return c.Sandbox.Checkpoint(c.ID, f)
}
// Pause suspends the container and its kernel.
// The call only succeeds if the container's status is created or running.
func (c *Container) Pause() error {
log.Debugf("Pausing container %q", c.ID)
unlock, err := c.lock()
if err != nil {
return err
}
defer unlock()
if c.Status != Created && c.Status != Running {
return fmt.Errorf("cannot pause container %q in state %v", c.ID, c.Status)
}
if err := c.Sandbox.Pause(c.ID); err != nil {
return fmt.Errorf("pausing container: %v", err)
}
c.changeStatus(Paused)
return c.save()
}
// Resume unpauses the container and its kernel.
// The call only succeeds if the container's status is paused.
func (c *Container) Resume() error {
log.Debugf("Resuming container %q", c.ID)
unlock, err := c.lock()
if err != nil {
return err
}
defer unlock()
if c.Status != Paused {
return fmt.Errorf("cannot resume container %q in state %v", c.ID, c.Status)
}
if err := c.Sandbox.Resume(c.ID); err != nil {
return fmt.Errorf("resuming container: %v", err)
}
c.changeStatus(Running)
return c.save()
}
// State returns the metadata of the container.
func (c *Container) State() specs.State {
return specs.State{
Version: specs.Version,
ID: c.ID,
Status: c.Status.String(),
Pid: c.SandboxPid(),
Bundle: c.BundleDir,
}
}
// Processes retrieves the list of processes and associated metadata inside a
// container.
func (c *Container) Processes() ([]*control.Process, error) {
if err := c.requireStatus("get processes of", Running, Paused); err != nil {
return nil, err
}
return c.Sandbox.Processes(c.ID)
}
// Destroy stops all processes and frees all resources associated with the
// container.
func (c *Container) Destroy() error {
log.Debugf("Destroy container %q", c.ID)
// We must perform the following cleanup steps:
// * stop the container and gofer processes,
// * remove the container filesystem on the host, and
// * delete the container metadata directory.
//
// It's possible for one or more of these steps to fail, but we should
// do our best to perform all of the cleanups. Hence, we keep a slice
// of errors return their concatenation.
var errs []string
if err := c.stop(); err != nil {
err = fmt.Errorf("stopping container: %v", err)
log.Warningf("%v", err)
errs = append(errs, err.Error())
}
if err := os.RemoveAll(c.Root); err != nil && !os.IsNotExist(err) {
err = fmt.Errorf("deleting container root directory %q: %v", c.Root, err)
log.Warningf("%v", err)
errs = append(errs, err.Error())
}
c.changeStatus(Stopped)
// "If any poststop hook fails, the runtime MUST log a warning, but the
// remaining hooks and lifecycle continue as if the hook had succeeded" -OCI spec.
// Based on the OCI, "The post-stop hooks MUST be called after the container is
// deleted but before the delete operation returns"
// Run it here to:
// 1) Conform to the OCI.
// 2) Make sure it only runs once, because the root has been deleted, the container
// can't be loaded again.
if c.Spec.Hooks != nil {
executeHooksBestEffort(c.Spec.Hooks.Poststop, c.State())
}
if len(errs) == 0 {
return nil
}
return fmt.Errorf(strings.Join(errs, "\n"))
}
// save saves the container metadata to a file.
//
// Precondition: container must be locked with container.lock().
func (c *Container) save() error {
log.Debugf("Save container %q", c.ID)
metaFile := filepath.Join(c.Root, metadataFilename)
meta, err := json.Marshal(c)
if err != nil {
return fmt.Errorf("invalid container metadata: %v", err)
}
if err := ioutil.WriteFile(metaFile, meta, 0640); err != nil {
return fmt.Errorf("writing container metadata: %v", err)
}
return nil
}
// stop stops the container (for regular containers) or the sandbox (for
// root containers), and waits for the container or sandbox and the gofer
// to stop. If any of them doesn't stop before timeout, an error is returned.
func (c *Container) stop() error {
var cgroup *cgroup.Cgroup
if c.Sandbox != nil {
log.Debugf("Destroying container %q", c.ID)
if err := c.Sandbox.DestroyContainer(c.ID); err != nil {
return fmt.Errorf("destroying container %q: %v", c.ID, err)
}
// Only uninstall cgroup for sandbox stop.
if c.Sandbox.IsRootContainer(c.ID) {
cgroup = c.Sandbox.Cgroup
}
// Only set sandbox to nil after it has been told to destroy the container.
c.Sandbox = nil
}
// Try killing gofer if it does not exit with container.
if c.GoferPid != 0 {
log.Debugf("Killing gofer for container %q, PID: %d", c.ID, c.GoferPid)
if err := syscall.Kill(c.GoferPid, syscall.SIGKILL); err != nil {
// The gofer may already be stopped, log the error.
log.Warningf("Error sending signal %d to gofer %d: %v", syscall.SIGKILL, c.GoferPid, err)
}
}
if err := c.waitForStopped(); err != nil {
return err
}
// Gofer is running in cgroups, so Cgroup.Uninstall has to be called after it.
if cgroup != nil {
if err := cgroup.Uninstall(); err != nil {
return err
}
}
return nil
}
func (c *Container) waitForStopped() error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
op := func() error {
if c.isSandboxRunning() {
if err := c.SignalContainer(syscall.Signal(0), false); err == nil {
return fmt.Errorf("container is still running")
}
}
if c.GoferPid == 0 {
return nil
}
if c.goferIsChild {
// The gofer process is a child of the current process,
// so we can wait it and collect its zombie.
wpid, err := syscall.Wait4(int(c.GoferPid), nil, syscall.WNOHANG, nil)
if err != nil {
return fmt.Errorf("error waiting the gofer process: %v", err)
}
if wpid == 0 {
return fmt.Errorf("gofer is still running")
}
} else if err := syscall.Kill(c.GoferPid, 0); err == nil {
return fmt.Errorf("gofer is still running")
}
c.GoferPid = 0
return nil
}
return backoff.Retry(op, b)
}
func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundleDir string) ([]*os.File, *os.File, error) {
// Start with the general config flags.
args := conf.ToFlags()
var goferEnds []*os.File
// nextFD is the next available file descriptor for the gofer process.
// It starts at 3 because 0-2 are used by stdin/stdout/stderr.
nextFD := 3
if conf.LogFilename != "" {
logFile, err := os.OpenFile(conf.LogFilename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, nil, fmt.Errorf("opening log file %q: %v", conf.LogFilename, err)
}
defer logFile.Close()
goferEnds = append(goferEnds, logFile)
args = append(args, "--log-fd="+strconv.Itoa(nextFD))
nextFD++
}
if conf.DebugLog != "" {
debugLogFile, err := specutils.DebugLogFile(conf.DebugLog, "gofer")
if err != nil {
return nil, nil, fmt.Errorf("opening debug log file in %q: %v", conf.DebugLog, err)
}
defer debugLogFile.Close()
goferEnds = append(goferEnds, debugLogFile)
args = append(args, "--debug-log-fd="+strconv.Itoa(nextFD))
nextFD++
}
args = append(args, "gofer", "--bundle", bundleDir)
if conf.Overlay {
args = append(args, "--panic-on-write=true")
}
// Open the spec file to donate to the sandbox.
specFile, err := specutils.OpenSpec(bundleDir)
if err != nil {
return nil, nil, fmt.Errorf("opening spec file: %v", err)
}
defer specFile.Close()
goferEnds = append(goferEnds, specFile)
args = append(args, "--spec-fd="+strconv.Itoa(nextFD))
nextFD++
// Create pipe that allows gofer to send mount list to sandbox after all paths
// have been resolved.
mountsSand, mountsGofer, err := os.Pipe()
if err != nil {
return nil, nil, err
}
defer mountsGofer.Close()
goferEnds = append(goferEnds, mountsGofer)
args = append(args, fmt.Sprintf("--mounts-fd=%d", nextFD))
nextFD++
// Add root mount and then add any other additional mounts.
mountCount := 1
for _, m := range spec.Mounts {
if specutils.Is9PMount(m) {
mountCount++
}
}
sandEnds := make([]*os.File, 0, mountCount)
for i := 0; i < mountCount; i++ {
fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)
if err != nil {
return nil, nil, err
}
sandEnds = append(sandEnds, os.NewFile(uintptr(fds[0]), "sandbox IO FD"))
goferEnd := os.NewFile(uintptr(fds[1]), "gofer IO FD")
defer goferEnd.Close()
goferEnds = append(goferEnds, goferEnd)
args = append(args, fmt.Sprintf("--io-fds=%d", nextFD))
nextFD++
}
binPath := specutils.ExePath
cmd := exec.Command(binPath, args...)
cmd.ExtraFiles = goferEnds
cmd.Args[0] = "runsc-gofer"
// Enter new namespaces to isolate from the rest of the system. Don't unshare
// cgroup because gofer is added to a cgroup in the caller's namespace.
nss := []specs.LinuxNamespace{
{Type: specs.IPCNamespace},
{Type: specs.MountNamespace},
{Type: specs.NetworkNamespace},
{Type: specs.PIDNamespace},
{Type: specs.UTSNamespace},
}
// Setup any uid/gid mappings, and create or join the configured user
// namespace so the gofer's view of the filesystem aligns with the
// users in the sandbox.
userNS := specutils.FilterNS([]specs.LinuxNamespaceType{specs.UserNamespace}, spec)
nss = append(nss, userNS...)
specutils.SetUIDGIDMappings(cmd, spec)
if len(userNS) != 0 {
// We need to set UID and GID to have capabilities in a new user namespace.
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: 0, Gid: 0}
}
// Start the gofer in the given namespace.
log.Debugf("Starting gofer: %s %v", binPath, args)
if err := specutils.StartInNS(cmd, nss); err != nil {
return nil, nil, err
}
log.Infof("Gofer started, PID: %d", cmd.Process.Pid)
c.GoferPid = cmd.Process.Pid
c.goferIsChild = true
return sandEnds, mountsSand, nil
}
// changeStatus transitions from one status to another ensuring that the
// transition is valid.
func (c *Container) changeStatus(s Status) {
switch s {
case Creating:
// Initial state, never transitions to it.
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
case Created:
if c.Status != Creating {
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
}
if c.Sandbox == nil {
panic("sandbox cannot be nil")
}
case Paused:
if c.Status != Running {
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
}
if c.Sandbox == nil {
panic("sandbox cannot be nil")
}
case Running:
if c.Status != Created && c.Status != Paused {
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
}
if c.Sandbox == nil {
panic("sandbox cannot be nil")
}
case Stopped:
if c.Status != Creating && c.Status != Created && c.Status != Running && c.Status != Stopped {
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
}
default:
panic(fmt.Sprintf("invalid new state: %v", s))
}
c.Status = s
}
func (c *Container) isSandboxRunning() bool {
return c.Sandbox != nil && c.Sandbox.IsRunning()
}
func (c *Container) requireStatus(action string, statuses ...Status) error {
for _, s := range statuses {
if c.Status == s {
return nil
}
}
return fmt.Errorf("cannot %s container %q in state %s", action, c.ID, c.Status)
}
// lock takes a file lock on the container metadata lock file.
func (c *Container) lock() (func() error, error) {
return lockContainerMetadata(filepath.Join(c.Root, c.ID))
}
// lockContainerMetadata takes a file lock on the metadata lock file in the
// given container root directory.
func lockContainerMetadata(containerRootDir string) (func() error, error) {
if err := os.MkdirAll(containerRootDir, 0711); err != nil {
return nil, fmt.Errorf("creating container root directory %q: %v", containerRootDir, err)
}
f := filepath.Join(containerRootDir, metadataLockFilename)
l := flock.NewFlock(f)
if err := l.Lock(); err != nil {
return nil, fmt.Errorf("acquiring lock on container lock file %q: %v", f, err)
}
return l.Unlock, nil
}
// runInCgroup executes fn inside the specified cgroup. If cg is nil, execute
// it in the current context.
func runInCgroup(cg *cgroup.Cgroup, fn func() error) error {
if cg == nil {
return fn()
}
restore, err := cg.Join()
defer restore()
if err != nil {
return err
}
return fn()
}
Fix container_test flakes.
Create, Start, and Destroy were racing to create and destroy the
metadata directory of containers.
This is a re-upload of
https://gvisor-review.googlesource.com/c/gvisor/+/16260, but with the
correct account.
Change-Id: I16b7a9d0971f0df873e7f4145e6ac8f72730a4f1
PiperOrigin-RevId: 244892991
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package container creates and manipulates containers.
package container
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"github.com/cenkalti/backoff"
"github.com/gofrs/flock"
specs "github.com/opencontainers/runtime-spec/specs-go"
"gvisor.googlesource.com/gvisor/pkg/log"
"gvisor.googlesource.com/gvisor/pkg/sentry/control"
"gvisor.googlesource.com/gvisor/runsc/boot"
"gvisor.googlesource.com/gvisor/runsc/cgroup"
"gvisor.googlesource.com/gvisor/runsc/sandbox"
"gvisor.googlesource.com/gvisor/runsc/specutils"
)
const (
// metadataFilename is the name of the metadata file relative to the
// container root directory that holds sandbox metadata.
metadataFilename = "meta.json"
// metadataLockFilename is the name of a lock file in the container
// root directory that is used to prevent concurrent modifications to
// the container state and metadata.
metadataLockFilename = "meta.lock"
)
// validateID validates the container id.
func validateID(id string) error {
// See libcontainer/factory_linux.go.
idRegex := regexp.MustCompile(`^[\w+-\.]+$`)
if !idRegex.MatchString(id) {
return fmt.Errorf("invalid container id: %v", id)
}
return nil
}
// Container represents a containerized application. When running, the
// container is associated with a single Sandbox.
//
// Container metadata can be saved and loaded to disk. Within a root directory,
// we maintain subdirectories for each container named with the container id.
// The container metadata is stored as a json within the container directory
// in a file named "meta.json". This metadata format is defined by us and is
// not part of the OCI spec.
//
// Containers must write their metadata files after any change to their internal
// states. The entire container directory is deleted when the container is
// destroyed.
//
// When the container is stopped, all processes that belong to the container
// must be stopped before Destroy() returns. containerd makes roughly the
// following calls to stop a container:
// - First it attempts to kill the container process with
// 'runsc kill SIGTERM'. After some time, it escalates to SIGKILL. In a
// separate thread, it's waiting on the container. As soon as the wait
// returns, it moves on to the next step:
// - It calls 'runsc kill --all SIGKILL' to stop every process that belongs to
// the container. 'kill --all SIGKILL' waits for all processes before
// returning.
// - Containerd waits for stdin, stdout and stderr to drain and be closed.
// - It calls 'runsc delete'. runc implementation kills --all SIGKILL once
// again just to be sure, waits, and then proceeds with remaining teardown.
//
type Container struct {
// ID is the container ID.
ID string `json:"id"`
// Spec is the OCI runtime spec that configures this container.
Spec *specs.Spec `json:"spec"`
// BundleDir is the directory containing the container bundle.
BundleDir string `json:"bundleDir"`
// Root is the directory containing the container metadata file. If this
// container is the root container, Root and RootContainerDir will be the
// same.
Root string `json:"root"`
// CreatedAt is the time the container was created.
CreatedAt time.Time `json:"createdAt"`
// Owner is the container owner.
Owner string `json:"owner"`
// ConsoleSocket is the path to a unix domain socket that will receive
// the console FD.
ConsoleSocket string `json:"consoleSocket"`
// Status is the current container Status.
Status Status `json:"status"`
// GoferPid is the PID of the gofer running along side the sandbox. May
// be 0 if the gofer has been killed.
GoferPid int `json:"goferPid"`
// goferIsChild is set if a gofer process is a child of the current process.
//
// This field isn't saved to json, because only a creator of a gofer
// process will have it as a child process.
goferIsChild bool
// Sandbox is the sandbox this container is running in. It's set when the
// container is created and reset when the sandbox is destroyed.
Sandbox *sandbox.Sandbox `json:"sandbox"`
// RootContainerDir is the root directory containing the metadata file of the
// sandbox root container. It's used to lock in order to serialize creating
// and deleting this Container's metadata directory. If this container is the
// root container, this is the same as Root.
RootContainerDir string
}
// Load loads a container with the given id from a metadata file. id may be an
// abbreviation of the full container id, in which case Load loads the
// container to which id unambiguously refers to.
// Returns ErrNotExist if container doesn't exist.
func Load(rootDir, id string) (*Container, error) {
log.Debugf("Load container %q %q", rootDir, id)
if err := validateID(id); err != nil {
return nil, fmt.Errorf("validating id: %v", err)
}
cRoot, err := findContainerRoot(rootDir, id)
if err != nil {
// Preserve error so that callers can distinguish 'not found' errors.
return nil, err
}
// Lock the container metadata to prevent other runsc instances from
// writing to it while we are reading it.
unlock, err := lockContainerMetadata(cRoot)
if err != nil {
return nil, err
}
defer unlock()
// Read the container metadata file and create a new Container from it.
metaFile := filepath.Join(cRoot, metadataFilename)
metaBytes, err := ioutil.ReadFile(metaFile)
if err != nil {
if os.IsNotExist(err) {
// Preserve error so that callers can distinguish 'not found' errors.
return nil, err
}
return nil, fmt.Errorf("reading container metadata file %q: %v", metaFile, err)
}
var c Container
if err := json.Unmarshal(metaBytes, &c); err != nil {
return nil, fmt.Errorf("unmarshaling container metadata from %q: %v", metaFile, err)
}
// If the status is "Running" or "Created", check that the sandbox
// process still exists, and set it to Stopped if it does not.
//
// This is inherently racey.
if c.Status == Running || c.Status == Created {
// Check if the sandbox process is still running.
if !c.isSandboxRunning() {
// Sandbox no longer exists, so this container definitely does not exist.
c.changeStatus(Stopped)
} else if c.Status == Running {
// Container state should reflect the actual state of the application, so
// we don't consider gofer process here.
if err := c.SignalContainer(syscall.Signal(0), false); err != nil {
c.changeStatus(Stopped)
}
}
}
return &c, nil
}
func findContainerRoot(rootDir, partialID string) (string, error) {
// Check whether the id fully specifies an existing container.
cRoot := filepath.Join(rootDir, partialID)
if _, err := os.Stat(cRoot); err == nil {
return cRoot, nil
}
// Now see whether id could be an abbreviation of exactly 1 of the
// container ids. If id is ambigious (it could match more than 1
// container), it is an error.
cRoot = ""
ids, err := List(rootDir)
if err != nil {
return "", err
}
for _, id := range ids {
if strings.HasPrefix(id, partialID) {
if cRoot != "" {
return "", fmt.Errorf("id %q is ambiguous and could refer to multiple containers: %q, %q", partialID, cRoot, id)
}
cRoot = id
}
}
if cRoot == "" {
return "", os.ErrNotExist
}
log.Debugf("abbreviated id %q resolves to full id %q", partialID, cRoot)
return filepath.Join(rootDir, cRoot), nil
}
// List returns all container ids in the given root directory.
func List(rootDir string) ([]string, error) {
log.Debugf("List containers %q", rootDir)
fs, err := ioutil.ReadDir(rootDir)
if err != nil {
return nil, fmt.Errorf("reading dir %q: %v", rootDir, err)
}
var out []string
for _, f := range fs {
out = append(out, f.Name())
}
return out, nil
}
// Create creates the container in a new Sandbox process, unless the metadata
// indicates that an existing Sandbox should be used. The caller must call
// Destroy() on the container.
func Create(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile, userLog string) (*Container, error) {
log.Debugf("Create container %q in root dir: %s", id, conf.RootDir)
if err := validateID(id); err != nil {
return nil, err
}
unlockRoot, err := maybeLockRootContainer(spec, conf.RootDir)
if err != nil {
return nil, err
}
defer unlockRoot()
// Lock the container metadata file to prevent concurrent creations of
// containers with the same id.
containerRoot := filepath.Join(conf.RootDir, id)
unlock, err := lockContainerMetadata(containerRoot)
if err != nil {
return nil, err
}
defer unlock()
// Check if the container already exists by looking for the metadata
// file.
if _, err := os.Stat(filepath.Join(containerRoot, metadataFilename)); err == nil {
return nil, fmt.Errorf("container with id %q already exists", id)
} else if !os.IsNotExist(err) {
return nil, fmt.Errorf("looking for existing container in %q: %v", containerRoot, err)
}
c := &Container{
ID: id,
Spec: spec,
ConsoleSocket: consoleSocket,
BundleDir: bundleDir,
Root: containerRoot,
Status: Creating,
CreatedAt: time.Now(),
Owner: os.Getenv("USER"),
RootContainerDir: conf.RootDir,
}
// The Cleanup object cleans up partially created containers when an error occurs.
// Any errors occuring during cleanup itself are ignored.
cu := specutils.MakeCleanup(func() { _ = c.Destroy() })
defer cu.Clean()
// If the metadata annotations indicate that this container should be
// started in an existing sandbox, we must do so. The metadata will
// indicate the ID of the sandbox, which is the same as the ID of the
// init container in the sandbox.
if isRoot(spec) {
log.Debugf("Creating new sandbox for container %q", id)
// Create and join cgroup before processes are created to ensure they are
// part of the cgroup from the start (and all tneir children processes).
cg, err := cgroup.New(spec)
if err != nil {
return nil, err
}
if cg != nil {
// If there is cgroup config, install it before creating sandbox process.
if err := cg.Install(spec.Linux.Resources); err != nil {
return nil, fmt.Errorf("configuring cgroup: %v", err)
}
}
if err := runInCgroup(cg, func() error {
ioFiles, specFile, err := c.createGoferProcess(spec, conf, bundleDir)
if err != nil {
return err
}
// Start a new sandbox for this container. Any errors after this point
// must destroy the container.
c.Sandbox, err = sandbox.New(id, spec, conf, bundleDir, consoleSocket, userLog, ioFiles, specFile, cg)
return err
}); err != nil {
return nil, err
}
} else {
// This is sort of confusing. For a sandbox with a root
// container and a child container in it, runsc sees:
// * A container struct whose sandbox ID is equal to the
// container ID. This is the root container that is tied to
// the creation of the sandbox.
// * A container struct whose sandbox ID is equal to the above
// container/sandbox ID, but that has a different container
// ID. This is the child container.
sbid, ok := specutils.SandboxID(spec)
if !ok {
return nil, fmt.Errorf("no sandbox ID found when creating container")
}
log.Debugf("Creating new container %q in sandbox %q", c.ID, sbid)
// Find the sandbox associated with this ID.
sb, err := Load(conf.RootDir, sbid)
if err != nil {
return nil, err
}
c.Sandbox = sb.Sandbox
if err := c.Sandbox.CreateContainer(c.ID); err != nil {
return nil, err
}
}
c.changeStatus(Created)
// Save the metadata file.
if err := c.save(); err != nil {
return nil, err
}
// Write the PID file. Containerd considers the create complete after
// this file is created, so it must be the last thing we do.
if pidFile != "" {
if err := ioutil.WriteFile(pidFile, []byte(strconv.Itoa(c.SandboxPid())), 0644); err != nil {
return nil, fmt.Errorf("error writing PID file: %v", err)
}
}
cu.Release()
return c, nil
}
// Start starts running the containerized process inside the sandbox.
func (c *Container) Start(conf *boot.Config) error {
log.Debugf("Start container %q", c.ID)
unlockRoot, err := maybeLockRootContainer(c.Spec, c.RootContainerDir)
if err != nil {
return err
}
defer unlockRoot()
unlock, err := c.lock()
if err != nil {
return err
}
defer unlock()
if err := c.requireStatus("start", Created); err != nil {
return err
}
// "If any prestart hook fails, the runtime MUST generate an error,
// stop and destroy the container" -OCI spec.
if c.Spec.Hooks != nil {
if err := executeHooks(c.Spec.Hooks.Prestart, c.State()); err != nil {
return err
}
}
if isRoot(c.Spec) {
if err := c.Sandbox.StartRoot(c.Spec, conf); err != nil {
return err
}
} else {
// Join cgroup to strt gofer process to ensure it's part of the cgroup from
// the start (and all tneir children processes).
if err := runInCgroup(c.Sandbox.Cgroup, func() error {
// Create the gofer process.
ioFiles, mountsFile, err := c.createGoferProcess(c.Spec, conf, c.BundleDir)
if err != nil {
return err
}
defer mountsFile.Close()
cleanMounts, err := specutils.ReadMounts(mountsFile)
if err != nil {
return fmt.Errorf("reading mounts file: %v", err)
}
c.Spec.Mounts = cleanMounts
return c.Sandbox.StartContainer(c.Spec, conf, c.ID, ioFiles)
}); err != nil {
return err
}
}
// "If any poststart hook fails, the runtime MUST log a warning, but
// the remaining hooks and lifecycle continue as if the hook had
// succeeded" -OCI spec.
if c.Spec.Hooks != nil {
executeHooksBestEffort(c.Spec.Hooks.Poststart, c.State())
}
c.changeStatus(Running)
return c.save()
}
// Restore takes a container and replaces its kernel and file system
// to restore a container from its state file.
func (c *Container) Restore(spec *specs.Spec, conf *boot.Config, restoreFile string) error {
log.Debugf("Restore container %q", c.ID)
unlock, err := c.lock()
if err != nil {
return err
}
defer unlock()
if err := c.requireStatus("restore", Created); err != nil {
return err
}
if err := c.Sandbox.Restore(c.ID, spec, conf, restoreFile); err != nil {
return err
}
c.changeStatus(Running)
return c.save()
}
// Run is a helper that calls Create + Start + Wait.
func Run(id string, spec *specs.Spec, conf *boot.Config, bundleDir, consoleSocket, pidFile, userLog string) (syscall.WaitStatus, error) {
log.Debugf("Run container %q in root dir: %s", id, conf.RootDir)
c, err := Create(id, spec, conf, bundleDir, consoleSocket, pidFile, userLog)
if err != nil {
return 0, fmt.Errorf("creating container: %v", err)
}
// Clean up partially created container if an error ocurrs.
// Any errors returned by Destroy() itself are ignored.
defer c.Destroy()
if err := c.Start(conf); err != nil {
return 0, fmt.Errorf("starting container: %v", err)
}
return c.Wait()
}
// Execute runs the specified command in the container. It returns the PID of
// the newly created process.
func (c *Container) Execute(args *control.ExecArgs) (int32, error) {
log.Debugf("Execute in container %q, args: %+v", c.ID, args)
if err := c.requireStatus("execute in", Created, Running); err != nil {
return 0, err
}
args.ContainerID = c.ID
return c.Sandbox.Execute(args)
}
// Event returns events for the container.
func (c *Container) Event() (*boot.Event, error) {
log.Debugf("Getting events for container %q", c.ID)
if err := c.requireStatus("get events for", Created, Running, Paused); err != nil {
return nil, err
}
return c.Sandbox.Event(c.ID)
}
// SandboxPid returns the Pid of the sandbox the container is running in, or -1 if the
// container is not running.
func (c *Container) SandboxPid() int {
if err := c.requireStatus("get PID", Created, Running, Paused); err != nil {
return -1
}
return c.Sandbox.Pid
}
// Wait waits for the container to exit, and returns its WaitStatus.
// Call to wait on a stopped container is needed to retrieve the exit status
// and wait returns immediately.
func (c *Container) Wait() (syscall.WaitStatus, error) {
log.Debugf("Wait on container %q", c.ID)
return c.Sandbox.Wait(c.ID)
}
// WaitRootPID waits for process 'pid' in the sandbox's PID namespace and
// returns its WaitStatus.
func (c *Container) WaitRootPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) {
log.Debugf("Wait on PID %d in sandbox %q", pid, c.Sandbox.ID)
if !c.isSandboxRunning() {
return 0, fmt.Errorf("sandbox is not running")
}
return c.Sandbox.WaitPID(c.Sandbox.ID, pid, clearStatus)
}
// WaitPID waits for process 'pid' in the container's PID namespace and returns
// its WaitStatus.
func (c *Container) WaitPID(pid int32, clearStatus bool) (syscall.WaitStatus, error) {
log.Debugf("Wait on PID %d in container %q", pid, c.ID)
if !c.isSandboxRunning() {
return 0, fmt.Errorf("sandbox is not running")
}
return c.Sandbox.WaitPID(c.ID, pid, clearStatus)
}
// SignalContainer sends the signal to the container. If all is true and signal
// is SIGKILL, then waits for all processes to exit before returning.
// SignalContainer returns an error if the container is already stopped.
// TODO: Distinguish different error types.
func (c *Container) SignalContainer(sig syscall.Signal, all bool) error {
log.Debugf("Signal container %q: %v", c.ID, sig)
// Signaling container in Stopped state is allowed. When all=false,
// an error will be returned anyway; when all=true, this allows
// sending signal to other processes inside the container even
// after the init process exits. This is especially useful for
// container cleanup.
if err := c.requireStatus("signal", Running, Stopped); err != nil {
return err
}
if !c.isSandboxRunning() {
return fmt.Errorf("sandbox is not running")
}
return c.Sandbox.SignalContainer(c.ID, sig, all)
}
// SignalProcess sends sig to a specific process in the container.
func (c *Container) SignalProcess(sig syscall.Signal, pid int32) error {
log.Debugf("Signal process %d in container %q: %v", pid, c.ID, sig)
if err := c.requireStatus("signal a process inside", Running); err != nil {
return err
}
if !c.isSandboxRunning() {
return fmt.Errorf("sandbox is not running")
}
return c.Sandbox.SignalProcess(c.ID, int32(pid), sig, false)
}
// ForwardSignals forwards all signals received by the current process to the
// container process inside the sandbox. It returns a function that will stop
// forwarding signals.
func (c *Container) ForwardSignals(pid int32, fgProcess bool) func() {
log.Debugf("Forwarding all signals to container %q PID %d fgProcess=%t", c.ID, pid, fgProcess)
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh)
go func() {
for s := range sigCh {
log.Debugf("Forwarding signal %d to container %q PID %d fgProcess=%t", s, c.ID, pid, fgProcess)
if err := c.Sandbox.SignalProcess(c.ID, pid, s.(syscall.Signal), fgProcess); err != nil {
log.Warningf("error forwarding signal %d to container %q: %v", s, c.ID, err)
}
}
log.Debugf("Done forwarding signals to container %q PID %d fgProcess=%t", c.ID, pid, fgProcess)
}()
return func() {
signal.Stop(sigCh)
close(sigCh)
}
}
// Checkpoint sends the checkpoint call to the container.
// The statefile will be written to f, the file at the specified image-path.
func (c *Container) Checkpoint(f *os.File) error {
log.Debugf("Checkpoint container %q", c.ID)
if err := c.requireStatus("checkpoint", Created, Running, Paused); err != nil {
return err
}
return c.Sandbox.Checkpoint(c.ID, f)
}
// Pause suspends the container and its kernel.
// The call only succeeds if the container's status is created or running.
func (c *Container) Pause() error {
log.Debugf("Pausing container %q", c.ID)
unlock, err := c.lock()
if err != nil {
return err
}
defer unlock()
if c.Status != Created && c.Status != Running {
return fmt.Errorf("cannot pause container %q in state %v", c.ID, c.Status)
}
if err := c.Sandbox.Pause(c.ID); err != nil {
return fmt.Errorf("pausing container: %v", err)
}
c.changeStatus(Paused)
return c.save()
}
// Resume unpauses the container and its kernel.
// The call only succeeds if the container's status is paused.
func (c *Container) Resume() error {
log.Debugf("Resuming container %q", c.ID)
unlock, err := c.lock()
if err != nil {
return err
}
defer unlock()
if c.Status != Paused {
return fmt.Errorf("cannot resume container %q in state %v", c.ID, c.Status)
}
if err := c.Sandbox.Resume(c.ID); err != nil {
return fmt.Errorf("resuming container: %v", err)
}
c.changeStatus(Running)
return c.save()
}
// State returns the metadata of the container.
func (c *Container) State() specs.State {
return specs.State{
Version: specs.Version,
ID: c.ID,
Status: c.Status.String(),
Pid: c.SandboxPid(),
Bundle: c.BundleDir,
}
}
// Processes retrieves the list of processes and associated metadata inside a
// container.
func (c *Container) Processes() ([]*control.Process, error) {
if err := c.requireStatus("get processes of", Running, Paused); err != nil {
return nil, err
}
return c.Sandbox.Processes(c.ID)
}
// Destroy stops all processes and frees all resources associated with the
// container.
func (c *Container) Destroy() error {
log.Debugf("Destroy container %q", c.ID)
// We must perform the following cleanup steps:
// * stop the container and gofer processes,
// * remove the container filesystem on the host, and
// * delete the container metadata directory.
//
// It's possible for one or more of these steps to fail, but we should
// do our best to perform all of the cleanups. Hence, we keep a slice
// of errors return their concatenation.
var errs []string
unlock, err := maybeLockRootContainer(c.Spec, c.RootContainerDir)
if err != nil {
return err
}
defer unlock()
if err := c.stop(); err != nil {
err = fmt.Errorf("stopping container: %v", err)
log.Warningf("%v", err)
errs = append(errs, err.Error())
}
if err := os.RemoveAll(c.Root); err != nil && !os.IsNotExist(err) {
err = fmt.Errorf("deleting container root directory %q: %v", c.Root, err)
log.Warningf("%v", err)
errs = append(errs, err.Error())
}
c.changeStatus(Stopped)
// "If any poststop hook fails, the runtime MUST log a warning, but the
// remaining hooks and lifecycle continue as if the hook had succeeded" -OCI spec.
// Based on the OCI, "The post-stop hooks MUST be called after the container is
// deleted but before the delete operation returns"
// Run it here to:
// 1) Conform to the OCI.
// 2) Make sure it only runs once, because the root has been deleted, the container
// can't be loaded again.
if c.Spec.Hooks != nil {
executeHooksBestEffort(c.Spec.Hooks.Poststop, c.State())
}
if len(errs) == 0 {
return nil
}
return fmt.Errorf(strings.Join(errs, "\n"))
}
// save saves the container metadata to a file.
//
// Precondition: container must be locked with container.lock().
func (c *Container) save() error {
log.Debugf("Save container %q", c.ID)
metaFile := filepath.Join(c.Root, metadataFilename)
meta, err := json.Marshal(c)
if err != nil {
return fmt.Errorf("invalid container metadata: %v", err)
}
if err := ioutil.WriteFile(metaFile, meta, 0640); err != nil {
return fmt.Errorf("writing container metadata: %v", err)
}
return nil
}
// stop stops the container (for regular containers) or the sandbox (for
// root containers), and waits for the container or sandbox and the gofer
// to stop. If any of them doesn't stop before timeout, an error is returned.
func (c *Container) stop() error {
var cgroup *cgroup.Cgroup
if c.Sandbox != nil {
log.Debugf("Destroying container %q", c.ID)
if err := c.Sandbox.DestroyContainer(c.ID); err != nil {
return fmt.Errorf("destroying container %q: %v", c.ID, err)
}
// Only uninstall cgroup for sandbox stop.
if c.Sandbox.IsRootContainer(c.ID) {
cgroup = c.Sandbox.Cgroup
}
// Only set sandbox to nil after it has been told to destroy the container.
c.Sandbox = nil
}
// Try killing gofer if it does not exit with container.
if c.GoferPid != 0 {
log.Debugf("Killing gofer for container %q, PID: %d", c.ID, c.GoferPid)
if err := syscall.Kill(c.GoferPid, syscall.SIGKILL); err != nil {
// The gofer may already be stopped, log the error.
log.Warningf("Error sending signal %d to gofer %d: %v", syscall.SIGKILL, c.GoferPid, err)
}
}
if err := c.waitForStopped(); err != nil {
return err
}
// Gofer is running in cgroups, so Cgroup.Uninstall has to be called after it.
if cgroup != nil {
if err := cgroup.Uninstall(); err != nil {
return err
}
}
return nil
}
func (c *Container) waitForStopped() error {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
op := func() error {
if c.isSandboxRunning() {
if err := c.SignalContainer(syscall.Signal(0), false); err == nil {
return fmt.Errorf("container is still running")
}
}
if c.GoferPid == 0 {
return nil
}
if c.goferIsChild {
// The gofer process is a child of the current process,
// so we can wait it and collect its zombie.
wpid, err := syscall.Wait4(int(c.GoferPid), nil, syscall.WNOHANG, nil)
if err != nil {
return fmt.Errorf("error waiting the gofer process: %v", err)
}
if wpid == 0 {
return fmt.Errorf("gofer is still running")
}
} else if err := syscall.Kill(c.GoferPid, 0); err == nil {
return fmt.Errorf("gofer is still running")
}
c.GoferPid = 0
return nil
}
return backoff.Retry(op, b)
}
func (c *Container) createGoferProcess(spec *specs.Spec, conf *boot.Config, bundleDir string) ([]*os.File, *os.File, error) {
// Start with the general config flags.
args := conf.ToFlags()
var goferEnds []*os.File
// nextFD is the next available file descriptor for the gofer process.
// It starts at 3 because 0-2 are used by stdin/stdout/stderr.
nextFD := 3
if conf.LogFilename != "" {
logFile, err := os.OpenFile(conf.LogFilename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, nil, fmt.Errorf("opening log file %q: %v", conf.LogFilename, err)
}
defer logFile.Close()
goferEnds = append(goferEnds, logFile)
args = append(args, "--log-fd="+strconv.Itoa(nextFD))
nextFD++
}
if conf.DebugLog != "" {
debugLogFile, err := specutils.DebugLogFile(conf.DebugLog, "gofer")
if err != nil {
return nil, nil, fmt.Errorf("opening debug log file in %q: %v", conf.DebugLog, err)
}
defer debugLogFile.Close()
goferEnds = append(goferEnds, debugLogFile)
args = append(args, "--debug-log-fd="+strconv.Itoa(nextFD))
nextFD++
}
args = append(args, "gofer", "--bundle", bundleDir)
if conf.Overlay {
args = append(args, "--panic-on-write=true")
}
// Open the spec file to donate to the sandbox.
specFile, err := specutils.OpenSpec(bundleDir)
if err != nil {
return nil, nil, fmt.Errorf("opening spec file: %v", err)
}
defer specFile.Close()
goferEnds = append(goferEnds, specFile)
args = append(args, "--spec-fd="+strconv.Itoa(nextFD))
nextFD++
// Create pipe that allows gofer to send mount list to sandbox after all paths
// have been resolved.
mountsSand, mountsGofer, err := os.Pipe()
if err != nil {
return nil, nil, err
}
defer mountsGofer.Close()
goferEnds = append(goferEnds, mountsGofer)
args = append(args, fmt.Sprintf("--mounts-fd=%d", nextFD))
nextFD++
// Add root mount and then add any other additional mounts.
mountCount := 1
for _, m := range spec.Mounts {
if specutils.Is9PMount(m) {
mountCount++
}
}
sandEnds := make([]*os.File, 0, mountCount)
for i := 0; i < mountCount; i++ {
fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)
if err != nil {
return nil, nil, err
}
sandEnds = append(sandEnds, os.NewFile(uintptr(fds[0]), "sandbox IO FD"))
goferEnd := os.NewFile(uintptr(fds[1]), "gofer IO FD")
defer goferEnd.Close()
goferEnds = append(goferEnds, goferEnd)
args = append(args, fmt.Sprintf("--io-fds=%d", nextFD))
nextFD++
}
binPath := specutils.ExePath
cmd := exec.Command(binPath, args...)
cmd.ExtraFiles = goferEnds
cmd.Args[0] = "runsc-gofer"
// Enter new namespaces to isolate from the rest of the system. Don't unshare
// cgroup because gofer is added to a cgroup in the caller's namespace.
nss := []specs.LinuxNamespace{
{Type: specs.IPCNamespace},
{Type: specs.MountNamespace},
{Type: specs.NetworkNamespace},
{Type: specs.PIDNamespace},
{Type: specs.UTSNamespace},
}
// Setup any uid/gid mappings, and create or join the configured user
// namespace so the gofer's view of the filesystem aligns with the
// users in the sandbox.
userNS := specutils.FilterNS([]specs.LinuxNamespaceType{specs.UserNamespace}, spec)
nss = append(nss, userNS...)
specutils.SetUIDGIDMappings(cmd, spec)
if len(userNS) != 0 {
// We need to set UID and GID to have capabilities in a new user namespace.
cmd.SysProcAttr.Credential = &syscall.Credential{Uid: 0, Gid: 0}
}
// Start the gofer in the given namespace.
log.Debugf("Starting gofer: %s %v", binPath, args)
if err := specutils.StartInNS(cmd, nss); err != nil {
return nil, nil, err
}
log.Infof("Gofer started, PID: %d", cmd.Process.Pid)
c.GoferPid = cmd.Process.Pid
c.goferIsChild = true
return sandEnds, mountsSand, nil
}
// changeStatus transitions from one status to another ensuring that the
// transition is valid.
func (c *Container) changeStatus(s Status) {
switch s {
case Creating:
// Initial state, never transitions to it.
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
case Created:
if c.Status != Creating {
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
}
if c.Sandbox == nil {
panic("sandbox cannot be nil")
}
case Paused:
if c.Status != Running {
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
}
if c.Sandbox == nil {
panic("sandbox cannot be nil")
}
case Running:
if c.Status != Created && c.Status != Paused {
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
}
if c.Sandbox == nil {
panic("sandbox cannot be nil")
}
case Stopped:
if c.Status != Creating && c.Status != Created && c.Status != Running && c.Status != Stopped {
panic(fmt.Sprintf("invalid state transition: %v => %v", c.Status, s))
}
default:
panic(fmt.Sprintf("invalid new state: %v", s))
}
c.Status = s
}
func (c *Container) isSandboxRunning() bool {
return c.Sandbox != nil && c.Sandbox.IsRunning()
}
func (c *Container) requireStatus(action string, statuses ...Status) error {
for _, s := range statuses {
if c.Status == s {
return nil
}
}
return fmt.Errorf("cannot %s container %q in state %s", action, c.ID, c.Status)
}
// lock takes a file lock on the container metadata lock file.
func (c *Container) lock() (func() error, error) {
return lockContainerMetadata(filepath.Join(c.Root, c.ID))
}
// lockContainerMetadata takes a file lock on the metadata lock file in the
// given container root directory.
func lockContainerMetadata(containerRootDir string) (func() error, error) {
if err := os.MkdirAll(containerRootDir, 0711); err != nil {
return nil, fmt.Errorf("creating container root directory %q: %v", containerRootDir, err)
}
f := filepath.Join(containerRootDir, metadataLockFilename)
l := flock.NewFlock(f)
if err := l.Lock(); err != nil {
return nil, fmt.Errorf("acquiring lock on container lock file %q: %v", f, err)
}
return l.Unlock, nil
}
// maybeLockRootContainer locks the sandbox root container. It is used to
// prevent races to create and delete child container sandboxes.
func maybeLockRootContainer(spec *specs.Spec, rootDir string) (func() error, error) {
if isRoot(spec) {
return func() error { return nil }, nil
}
sbid, ok := specutils.SandboxID(spec)
if !ok {
return nil, fmt.Errorf("no sandbox ID found when locking root container")
}
sb, err := Load(rootDir, sbid)
if err != nil {
return nil, err
}
unlock, err := sb.lock()
if err != nil {
return nil, err
}
return unlock, nil
}
func isRoot(spec *specs.Spec) bool {
return specutils.ShouldCreateSandbox(spec)
}
// runInCgroup executes fn inside the specified cgroup. If cg is nil, execute
// it in the current context.
func runInCgroup(cg *cgroup.Cgroup, fn func() error) error {
if cg == nil {
return fn()
}
restore, err := cg.Join()
defer restore()
if err != nil {
return err
}
return fn()
}
|
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package protoimpl
import (
"fmt"
"strings"
)
// These constants determine the current version of this module.
//
//
// For our release process, we enforce the following rules:
// * Tagged releases use a tag that is identical to VersionString.
// * Tagged releases never reference a commit where the VersionString
// contains "devel".
// * The set of all commits in this repository where VersionString
// does not contain "devel" must have a unique VersionString.
//
//
// Steps for tagging a new release:
// 1. Create a new CL.
//
// 2. Update versionMinor, versionPatch, and/or versionPreRelease as necessary.
// versionPreRelease must not contain the string "devel".
//
// 3. Since the last released minor version, have there been any changes to
// generator that relies on new functionality in the runtime?
// If yes, then increment GenVersion.
//
// 4. Since the last released minor version, have there been any changes to
// the runtime that removes support for old .pb.go source code?
// If yes, then increment MinVersion.
//
// 5. Send out the CL for review and submit it.
// Note that the next CL in step 8 must be submitted after this CL
// without any other CLs in-between.
//
// 6. Tag a new version, where the tag is is the current VersionString.
//
// 7. Write release notes for all notable changes
// between this release and the last release.
//
// 8. Create a new CL.
//
// 9. Update versionPreRelease to include the string "devel".
// For example: "" -> "devel" or "rc.1" -> "rc.1.devel"
//
// 10. Send out the CL for review and submit it.
const (
versionMajor = 1
versionMinor = 20
versionPatch = 1
versionPreRelease = ""
)
// VersionString formats the version string for this module in semver format.
//
// Examples:
// v1.20.1
// v1.21.0-rc.1
func VersionString() string {
v := fmt.Sprintf("v%d.%d.%d", versionMajor, versionMinor, versionPatch)
if versionPreRelease != "" {
v += "-" + versionPreRelease
// TODO: Add metadata about the commit or build hash.
// See https://golang.org/issue/29814
// See https://golang.org/issue/33533
var versionMetadata string
if strings.Contains(versionPreRelease, "devel") && versionMetadata != "" {
v += "+" + versionMetadata
}
}
return v
}
const (
// MaxVersion is the maximum supported version for generated .pb.go files.
// It is always the current version of the module.
MaxVersion = versionMinor
// GenVersion is the runtime version required by generated .pb.go files.
// This is incremented when generated code relies on new functionality
// in the runtime.
GenVersion = 20
// MinVersion is the minimum supported version for generated .pb.go files.
// This is incremented when the runtime drops support for old code.
MinVersion = 0
)
// EnforceVersion is used by code generated by protoc-gen-go
// to statically enforce minimum and maximum versions of this package.
// A compilation failure implies either that:
// * the runtime package is too old and needs to be updated OR
// * the generated code is too old and needs to be regenerated.
//
// The runtime package can be upgraded by running:
// go get google.golang.org/protobuf
//
// The generated code can be regenerated by running:
// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES}
//
// Example usage by generated code:
// const (
// // Verify that this generated code is sufficiently up-to-date.
// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion)
// // Verify that runtime/protoimpl is sufficiently up-to-date.
// _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - genVersion)
// )
//
// The genVersion is the current minor version used to generated the code.
// This compile-time check relies on negative integer overflow of a uint
// being a compilation failure (guaranteed by the Go specification).
type EnforceVersion uint
// This enforces the following invariant:
// MinVersion ≤ GenVersion ≤ MaxVersion
const (
_ = EnforceVersion(GenVersion - MinVersion)
_ = EnforceVersion(MaxVersion - GenVersion)
)
all: start v1.20.1-devel
Change-Id: Ie2940cd41fd3c449b2233e1bc3b6ba5ba91038f2
Reviewed-on: https://go-review.googlesource.com/c/protobuf/+/222699
Reviewed-by: Damien Neil <7e091e9a80da888fcd7836fd3c6b7b85f4677c12@google.com>
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package protoimpl
import (
"fmt"
"strings"
)
// These constants determine the current version of this module.
//
//
// For our release process, we enforce the following rules:
// * Tagged releases use a tag that is identical to VersionString.
// * Tagged releases never reference a commit where the VersionString
// contains "devel".
// * The set of all commits in this repository where VersionString
// does not contain "devel" must have a unique VersionString.
//
//
// Steps for tagging a new release:
// 1. Create a new CL.
//
// 2. Update versionMinor, versionPatch, and/or versionPreRelease as necessary.
// versionPreRelease must not contain the string "devel".
//
// 3. Since the last released minor version, have there been any changes to
// generator that relies on new functionality in the runtime?
// If yes, then increment GenVersion.
//
// 4. Since the last released minor version, have there been any changes to
// the runtime that removes support for old .pb.go source code?
// If yes, then increment MinVersion.
//
// 5. Send out the CL for review and submit it.
// Note that the next CL in step 8 must be submitted after this CL
// without any other CLs in-between.
//
// 6. Tag a new version, where the tag is is the current VersionString.
//
// 7. Write release notes for all notable changes
// between this release and the last release.
//
// 8. Create a new CL.
//
// 9. Update versionPreRelease to include the string "devel".
// For example: "" -> "devel" or "rc.1" -> "rc.1.devel"
//
// 10. Send out the CL for review and submit it.
const (
versionMajor = 1
versionMinor = 20
versionPatch = 1
versionPreRelease = "devel"
)
// VersionString formats the version string for this module in semver format.
//
// Examples:
// v1.20.1
// v1.21.0-rc.1
func VersionString() string {
v := fmt.Sprintf("v%d.%d.%d", versionMajor, versionMinor, versionPatch)
if versionPreRelease != "" {
v += "-" + versionPreRelease
// TODO: Add metadata about the commit or build hash.
// See https://golang.org/issue/29814
// See https://golang.org/issue/33533
var versionMetadata string
if strings.Contains(versionPreRelease, "devel") && versionMetadata != "" {
v += "+" + versionMetadata
}
}
return v
}
const (
// MaxVersion is the maximum supported version for generated .pb.go files.
// It is always the current version of the module.
MaxVersion = versionMinor
// GenVersion is the runtime version required by generated .pb.go files.
// This is incremented when generated code relies on new functionality
// in the runtime.
GenVersion = 20
// MinVersion is the minimum supported version for generated .pb.go files.
// This is incremented when the runtime drops support for old code.
MinVersion = 0
)
// EnforceVersion is used by code generated by protoc-gen-go
// to statically enforce minimum and maximum versions of this package.
// A compilation failure implies either that:
// * the runtime package is too old and needs to be updated OR
// * the generated code is too old and needs to be regenerated.
//
// The runtime package can be upgraded by running:
// go get google.golang.org/protobuf
//
// The generated code can be regenerated by running:
// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES}
//
// Example usage by generated code:
// const (
// // Verify that this generated code is sufficiently up-to-date.
// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion)
// // Verify that runtime/protoimpl is sufficiently up-to-date.
// _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - genVersion)
// )
//
// The genVersion is the current minor version used to generated the code.
// This compile-time check relies on negative integer overflow of a uint
// being a compilation failure (guaranteed by the Go specification).
type EnforceVersion uint
// This enforces the following invariant:
// MinVersion ≤ GenVersion ≤ MaxVersion
const (
_ = EnforceVersion(GenVersion - MinVersion)
_ = EnforceVersion(MaxVersion - GenVersion)
)
|
// Package middlewares is a group of functions. They mutualize some actions
// common to many gin handlers, like checking authentication or permissions.
package middlewares
import (
"fmt"
"net/http"
"net/url"
"os"
"strings"
"github.com/gin-gonic/gin"
"github.com/spf13/afero"
)
// An Instance has the informations relatives to the logical cozy instance,
// like the domain, the locale or the access to the databases and files storage
type Instance struct {
Domain string // The main DNS domain, like example.cozycloud.cc
StorageURL string // Where the binaries are persisted
storage afero.Fs
}
// GetStorageProvider returns the afero storage provider where the binaries for
// the current instance are persisted
func (instance *Instance) GetStorageProvider() (afero.Fs, error) {
if instance.storage != nil {
return instance.storage, nil
}
u, err := url.Parse(instance.StorageURL)
if err != nil {
return nil, err
}
switch u.Scheme {
case "file":
instance.storage = afero.NewBasePathFs(afero.NewOsFs(), u.Path)
case "mem":
instance.storage = afero.NewMemMapFs()
default:
return nil, fmt.Errorf("Unknown storage provider: %v", u.Scheme)
}
return instance.storage, nil
}
// GetDatabasePrefix returns the prefix to use in database naming for the
// current instance
func (instance *Instance) GetDatabasePrefix() string {
return instance.Domain + "/"
}
// SetInstance creates a gin middleware to put the instance in the gin context
// for next handlers
func SetInstance() gin.HandlerFunc {
return func(c *gin.Context) {
domain := c.Request.Host
// TODO this is not fail-safe, to be modified before production
if domain == "" || strings.Contains(c.Request.Host, "127.0.0.1") {
domain = "dev"
}
wd, err := os.Getwd()
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
storageURL := "file://localhost" + wd + "/" + domain + "/"
instance := &Instance{
Domain: domain,
StorageURL: storageURL,
}
c.Set("instance", instance)
}
}
make localhost a dev host
// Package middlewares is a group of functions. They mutualize some actions
// common to many gin handlers, like checking authentication or permissions.
package middlewares
import (
"fmt"
"net/http"
"net/url"
"os"
"strings"
"github.com/gin-gonic/gin"
"github.com/spf13/afero"
)
// An Instance has the informations relatives to the logical cozy instance,
// like the domain, the locale or the access to the databases and files storage
type Instance struct {
Domain string // The main DNS domain, like example.cozycloud.cc
StorageURL string // Where the binaries are persisted
storage afero.Fs
}
// GetStorageProvider returns the afero storage provider where the binaries for
// the current instance are persisted
func (instance *Instance) GetStorageProvider() (afero.Fs, error) {
if instance.storage != nil {
return instance.storage, nil
}
u, err := url.Parse(instance.StorageURL)
if err != nil {
return nil, err
}
switch u.Scheme {
case "file":
instance.storage = afero.NewBasePathFs(afero.NewOsFs(), u.Path)
case "mem":
instance.storage = afero.NewMemMapFs()
default:
return nil, fmt.Errorf("Unknown storage provider: %v", u.Scheme)
}
return instance.storage, nil
}
// GetDatabasePrefix returns the prefix to use in database naming for the
// current instance
func (instance *Instance) GetDatabasePrefix() string {
return instance.Domain + "/"
}
// SetInstance creates a gin middleware to put the instance in the gin context
// for next handlers
func SetInstance() gin.HandlerFunc {
return func(c *gin.Context) {
domain := c.Request.Host
// TODO this is not fail-safe, to be modified before production
if domain == "" || strings.Contains(c.Request.Host, "127.0.0.1") || strings.Contains(c.Request.Host, "localhost") {
domain = "dev"
}
wd, err := os.Getwd()
if err != nil {
c.AbortWithError(http.StatusInternalServerError, err)
return
}
storageURL := "file://localhost" + wd + "/" + domain + "/"
instance := &Instance{
Domain: domain,
StorageURL: storageURL,
}
c.Set("instance", instance)
}
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package leaderelection implements leader election of a set of endpoints.
// It uses an annotation in the endpoints object to store the record of the
// election state.
//
// This implementation does not guarantee that only one client is acting as a
// leader (a.k.a. fencing). A client observes timestamps captured locally to
// infer the state of the leader election. Thus the implementation is tolerant
// to arbitrary clock skew, but is not tolerant to arbitrary clock skew rate.
//
// However the level of tolerance to skew rate can be configured by setting
// RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a
// maximum tolerated ratio of time passed on the fastest node to time passed on
// the slowest node can be approximately achieved with a configuration that sets
// the same ratio of LeaseDuration to RenewDeadline. For example if a user wanted
// to tolerate some nodes progressing forward in time twice as fast as other nodes,
// the user could set LeaseDuration to 60 seconds and RenewDeadline to 30 seconds.
//
// While not required, some method of clock synchronization between nodes in the
// cluster is highly recommended. It's important to keep in mind when configuring
// this client that the tolerance to skew rate varies inversely to master
// availability.
//
// Larger clusters often have a more lenient SLA for API latency. This should be
// taken into account when configuring the client. The rate of leader transitions
// should be monitored and RetryPeriod and LeaseDuration should be increased
// until the rate is stable and acceptably low. It's important to keep in mind
// when configuring this client that the tolerance to API latency varies inversely
// to master availability.
//
// DISCLAIMER: this is an alpha API. This library will likely change significantly
// or even be removed entirely in subsequent releases. Depend on this API at
// your own risk.
package leaderelection
import (
"fmt"
"reflect"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
"github.com/golang/glog"
)
const (
JitterFactor = 1.2
)
// NewLeaderElector creates a LeaderElector from a LeaderElectionConfig
func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
if lec.LeaseDuration <= lec.RenewDeadline {
return nil, fmt.Errorf("leaseDuration must be greater than renewDeadline")
}
if lec.RenewDeadline <= time.Duration(JitterFactor*float64(lec.RetryPeriod)) {
return nil, fmt.Errorf("renewDeadline must be greater than retryPeriod*JitterFactor")
}
if lec.Lock == nil {
return nil, fmt.Errorf("Lock must not be nil.")
}
return &LeaderElector{
config: lec,
}, nil
}
type LeaderElectionConfig struct {
// Lock is the resource that will be used for locking
Lock rl.Interface
// LeaseDuration is the duration that non-leader candidates will
// wait to force acquire leadership. This is measured against time of
// last observed ack.
LeaseDuration time.Duration
// RenewDeadline is the duration that the acting master will retry
// refreshing leadership before giving up.
RenewDeadline time.Duration
// RetryPeriod is the duration the LeaderElector clients should wait
// between tries of actions.
RetryPeriod time.Duration
// Callbacks are callbacks that are triggered during certain lifecycle
// events of the LeaderElector
Callbacks LeaderCallbacks
}
// LeaderCallbacks are callbacks that are triggered during certain
// lifecycle events of the LeaderElector. These are invoked asynchronously.
//
// possible future callbacks:
// * OnChallenge()
type LeaderCallbacks struct {
// OnStartedLeading is called when a LeaderElector client starts leading
OnStartedLeading func(stop <-chan struct{})
// OnStoppedLeading is called when a LeaderElector client stops leading
OnStoppedLeading func()
// OnNewLeader is called when the client observes a leader that is
// not the previously observed leader. This includes the first observed
// leader when the client starts.
OnNewLeader func(identity string)
}
// LeaderElector is a leader election client.
//
// possible future methods:
// * (le *LeaderElector) IsLeader()
// * (le *LeaderElector) GetLeader()
type LeaderElector struct {
config LeaderElectionConfig
// internal bookkeeping
observedRecord rl.LeaderElectionRecord
observedTime time.Time
// used to implement OnNewLeader(), may lag slightly from the
// value observedRecord.HolderIdentity if the transition has
// not yet been reported.
reportedLeader string
}
// Run starts the leader election loop
func (le *LeaderElector) Run() {
defer func() {
runtime.HandleCrash()
le.config.Callbacks.OnStoppedLeading()
}()
le.acquire()
stop := make(chan struct{})
go le.config.Callbacks.OnStartedLeading(stop)
le.renew()
close(stop)
}
// RunOrDie starts a client with the provided config or panics if the config
// fails to validate.
func RunOrDie(lec LeaderElectionConfig) {
le, err := NewLeaderElector(lec)
if err != nil {
panic(err)
}
le.Run()
}
// GetLeader returns the identity of the last observed leader or returns the empty string if
// no leader has yet been observed.
func (le *LeaderElector) GetLeader() string {
return le.observedRecord.HolderIdentity
}
// IsLeader returns true if the last observed leader was this client else returns false.
func (le *LeaderElector) IsLeader() bool {
return le.observedRecord.HolderIdentity == le.config.Lock.Identity()
}
// acquire loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew succeeds.
func (le *LeaderElector) acquire() {
stop := make(chan struct{})
desc := le.config.Lock.Describe()
glog.Infof("attempting to acquire leader lease %v...", desc)
wait.JitterUntil(func() {
succeeded := le.tryAcquireOrRenew()
le.maybeReportTransition()
if !succeeded {
glog.V(4).Infof("failed to acquire lease %v", desc)
return
}
le.config.Lock.RecordEvent("became leader")
glog.Infof("successfully acquired lease %v", desc)
close(stop)
}, le.config.RetryPeriod, JitterFactor, true, stop)
}
// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails.
func (le *LeaderElector) renew() {
stop := make(chan struct{})
wait.Until(func() {
err := wait.Poll(le.config.RetryPeriod, le.config.RenewDeadline, func() (bool, error) {
return le.tryAcquireOrRenew(), nil
})
le.maybeReportTransition()
desc := le.config.Lock.Describe()
if err == nil {
glog.V(4).Infof("successfully renewed lease %v", desc)
return
}
le.config.Lock.RecordEvent("stopped leading")
glog.Infof("failed to renew lease %v: %v", desc, err)
close(stop)
}, 0, stop)
}
// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired,
// else it tries to renew the lease if it has already been acquired. Returns true
// on success else returns false.
func (le *LeaderElector) tryAcquireOrRenew() bool {
now := metav1.Now()
leaderElectionRecord := rl.LeaderElectionRecord{
HolderIdentity: le.config.Lock.Identity(),
LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
RenewTime: now,
AcquireTime: now,
}
// 1. obtain or create the ElectionRecord
oldLeaderElectionRecord, err := le.config.Lock.Get()
if err != nil {
if !errors.IsNotFound(err) {
glog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
return false
}
if err = le.config.Lock.Create(leaderElectionRecord); err != nil {
glog.Errorf("error initially creating leader election record: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = time.Now()
return true
}
// 2. Record obtained, check the Identity & Time
if !reflect.DeepEqual(le.observedRecord, *oldLeaderElectionRecord) {
le.observedRecord = *oldLeaderElectionRecord
le.observedTime = time.Now()
}
if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
oldLeaderElectionRecord.HolderIdentity != le.config.Lock.Identity() {
glog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
return false
}
// 3. We're going to try to update. The leaderElectionRecord is set to it's default
// here. Let's correct it before updating.
if oldLeaderElectionRecord.HolderIdentity == le.config.Lock.Identity() {
leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions
} else {
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1
}
// update the lock itself
if err = le.config.Lock.Update(leaderElectionRecord); err != nil {
glog.Errorf("Failed to update lock: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = time.Now()
return true
}
func (l *LeaderElector) maybeReportTransition() {
if l.observedRecord.HolderIdentity == l.reportedLeader {
return
}
l.reportedLeader = l.observedRecord.HolderIdentity
if l.config.Callbacks.OnNewLeader != nil {
go l.config.Callbacks.OnNewLeader(l.reportedLeader)
}
}
prevent zero for leader election timeouts
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package leaderelection implements leader election of a set of endpoints.
// It uses an annotation in the endpoints object to store the record of the
// election state.
//
// This implementation does not guarantee that only one client is acting as a
// leader (a.k.a. fencing). A client observes timestamps captured locally to
// infer the state of the leader election. Thus the implementation is tolerant
// to arbitrary clock skew, but is not tolerant to arbitrary clock skew rate.
//
// However the level of tolerance to skew rate can be configured by setting
// RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a
// maximum tolerated ratio of time passed on the fastest node to time passed on
// the slowest node can be approximately achieved with a configuration that sets
// the same ratio of LeaseDuration to RenewDeadline. For example if a user wanted
// to tolerate some nodes progressing forward in time twice as fast as other nodes,
// the user could set LeaseDuration to 60 seconds and RenewDeadline to 30 seconds.
//
// While not required, some method of clock synchronization between nodes in the
// cluster is highly recommended. It's important to keep in mind when configuring
// this client that the tolerance to skew rate varies inversely to master
// availability.
//
// Larger clusters often have a more lenient SLA for API latency. This should be
// taken into account when configuring the client. The rate of leader transitions
// should be monitored and RetryPeriod and LeaseDuration should be increased
// until the rate is stable and acceptably low. It's important to keep in mind
// when configuring this client that the tolerance to API latency varies inversely
// to master availability.
//
// DISCLAIMER: this is an alpha API. This library will likely change significantly
// or even be removed entirely in subsequent releases. Depend on this API at
// your own risk.
package leaderelection
import (
"fmt"
"reflect"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
"github.com/golang/glog"
)
const (
JitterFactor = 1.2
)
// NewLeaderElector creates a LeaderElector from a LeaderElectionConfig
func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
if lec.LeaseDuration <= lec.RenewDeadline {
return nil, fmt.Errorf("leaseDuration must be greater than renewDeadline")
}
if lec.RenewDeadline <= time.Duration(JitterFactor*float64(lec.RetryPeriod)) {
return nil, fmt.Errorf("renewDeadline must be greater than retryPeriod*JitterFactor")
}
if lec.LeaseDuration < 1 {
return nil, fmt.Errorf("leaseDuration must be greater than zero")
}
if lec.RenewDeadline < 1 {
return nil, fmt.Errorf("renewDeadline must be greater than zero")
}
if lec.RetryPeriod < 1 {
return nil, fmt.Errorf("retryPeriod must be greater than zero")
}
if lec.Lock == nil {
return nil, fmt.Errorf("Lock must not be nil.")
}
return &LeaderElector{
config: lec,
}, nil
}
type LeaderElectionConfig struct {
// Lock is the resource that will be used for locking
Lock rl.Interface
// LeaseDuration is the duration that non-leader candidates will
// wait to force acquire leadership. This is measured against time of
// last observed ack.
LeaseDuration time.Duration
// RenewDeadline is the duration that the acting master will retry
// refreshing leadership before giving up.
RenewDeadline time.Duration
// RetryPeriod is the duration the LeaderElector clients should wait
// between tries of actions.
RetryPeriod time.Duration
// Callbacks are callbacks that are triggered during certain lifecycle
// events of the LeaderElector
Callbacks LeaderCallbacks
}
// LeaderCallbacks are callbacks that are triggered during certain
// lifecycle events of the LeaderElector. These are invoked asynchronously.
//
// possible future callbacks:
// * OnChallenge()
type LeaderCallbacks struct {
// OnStartedLeading is called when a LeaderElector client starts leading
OnStartedLeading func(stop <-chan struct{})
// OnStoppedLeading is called when a LeaderElector client stops leading
OnStoppedLeading func()
// OnNewLeader is called when the client observes a leader that is
// not the previously observed leader. This includes the first observed
// leader when the client starts.
OnNewLeader func(identity string)
}
// LeaderElector is a leader election client.
//
// possible future methods:
// * (le *LeaderElector) IsLeader()
// * (le *LeaderElector) GetLeader()
type LeaderElector struct {
config LeaderElectionConfig
// internal bookkeeping
observedRecord rl.LeaderElectionRecord
observedTime time.Time
// used to implement OnNewLeader(), may lag slightly from the
// value observedRecord.HolderIdentity if the transition has
// not yet been reported.
reportedLeader string
}
// Run starts the leader election loop
func (le *LeaderElector) Run() {
defer func() {
runtime.HandleCrash()
le.config.Callbacks.OnStoppedLeading()
}()
le.acquire()
stop := make(chan struct{})
go le.config.Callbacks.OnStartedLeading(stop)
le.renew()
close(stop)
}
// RunOrDie starts a client with the provided config or panics if the config
// fails to validate.
func RunOrDie(lec LeaderElectionConfig) {
le, err := NewLeaderElector(lec)
if err != nil {
panic(err)
}
le.Run()
}
// GetLeader returns the identity of the last observed leader or returns the empty string if
// no leader has yet been observed.
func (le *LeaderElector) GetLeader() string {
return le.observedRecord.HolderIdentity
}
// IsLeader returns true if the last observed leader was this client else returns false.
func (le *LeaderElector) IsLeader() bool {
return le.observedRecord.HolderIdentity == le.config.Lock.Identity()
}
// acquire loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew succeeds.
func (le *LeaderElector) acquire() {
stop := make(chan struct{})
desc := le.config.Lock.Describe()
glog.Infof("attempting to acquire leader lease %v...", desc)
wait.JitterUntil(func() {
succeeded := le.tryAcquireOrRenew()
le.maybeReportTransition()
if !succeeded {
glog.V(4).Infof("failed to acquire lease %v", desc)
return
}
le.config.Lock.RecordEvent("became leader")
glog.Infof("successfully acquired lease %v", desc)
close(stop)
}, le.config.RetryPeriod, JitterFactor, true, stop)
}
// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails.
func (le *LeaderElector) renew() {
stop := make(chan struct{})
wait.Until(func() {
err := wait.Poll(le.config.RetryPeriod, le.config.RenewDeadline, func() (bool, error) {
return le.tryAcquireOrRenew(), nil
})
le.maybeReportTransition()
desc := le.config.Lock.Describe()
if err == nil {
glog.V(4).Infof("successfully renewed lease %v", desc)
return
}
le.config.Lock.RecordEvent("stopped leading")
glog.Infof("failed to renew lease %v: %v", desc, err)
close(stop)
}, 0, stop)
}
// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired,
// else it tries to renew the lease if it has already been acquired. Returns true
// on success else returns false.
func (le *LeaderElector) tryAcquireOrRenew() bool {
now := metav1.Now()
leaderElectionRecord := rl.LeaderElectionRecord{
HolderIdentity: le.config.Lock.Identity(),
LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
RenewTime: now,
AcquireTime: now,
}
// 1. obtain or create the ElectionRecord
oldLeaderElectionRecord, err := le.config.Lock.Get()
if err != nil {
if !errors.IsNotFound(err) {
glog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
return false
}
if err = le.config.Lock.Create(leaderElectionRecord); err != nil {
glog.Errorf("error initially creating leader election record: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = time.Now()
return true
}
// 2. Record obtained, check the Identity & Time
if !reflect.DeepEqual(le.observedRecord, *oldLeaderElectionRecord) {
le.observedRecord = *oldLeaderElectionRecord
le.observedTime = time.Now()
}
if le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
oldLeaderElectionRecord.HolderIdentity != le.config.Lock.Identity() {
glog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
return false
}
// 3. We're going to try to update. The leaderElectionRecord is set to it's default
// here. Let's correct it before updating.
if oldLeaderElectionRecord.HolderIdentity == le.config.Lock.Identity() {
leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions
} else {
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1
}
// update the lock itself
if err = le.config.Lock.Update(leaderElectionRecord); err != nil {
glog.Errorf("Failed to update lock: %v", err)
return false
}
le.observedRecord = leaderElectionRecord
le.observedTime = time.Now()
return true
}
func (l *LeaderElector) maybeReportTransition() {
if l.observedRecord.HolderIdentity == l.reportedLeader {
return
}
l.reportedLeader = l.observedRecord.HolderIdentity
if l.config.Callbacks.OnNewLeader != nil {
go l.config.Callbacks.OnNewLeader(l.reportedLeader)
}
}
|
package comp
func bitwiseComp(n int) int {
return -1
}
solve 1009 use bit wise mask.
package comp
func bitwiseComp(n int) int {
return useMask(n)
}
// useMask time complexity O(lgN), space complexity O(1)
func useMask(n int) int {
mask := 1
// for example n = 5 -> 101, mask = 111, n's complement is 010
for n > mask {
mask = mask<<1 + 1
}
return mask - n
}
|
package tracer
import (
"os"
)
func Puts(x string) {
_puts(os.Stdout,x)
}
func Putln(x string) {
_putln(os.Stdout,x)
}
func PutV(s string, vals ...string) {
buf := make([]byte,0,len(s)*2)
buf=append(buf,[]byte(s)...)
for _,x := range vals {
buf=append(buf,[]byte(x)...)
}
_putln(os.Stdout,string(buf))
}
Extended API -- add "TraceOver"
package tracer
import (
"os"
)
func Puts(x string) {
_puts(os.Stdout,x)
}
func Putln(x string) {
_putln(os.Stdout,x)
}
func PutV(s string, vals ...string) {
buf := make([]byte,0,len(s)*2)
buf=append(buf,[]byte(s)...)
for _,x := range vals {
buf=append(buf,[]byte(x)...)
}
_putln(os.Stdout,string(buf))
}
func TraceOver(level uint) bool {
return (threshold <= level)
}
|
package main
import (
"bytes"
"fmt"
"os"
"os/exec"
"strconv"
"strings"
"github.com/PuerkitoBio/goquery"
"github.com/gorilla/feeds"
)
type Video struct {
MediaName string
MediaID string
VideoID string
}
func main() {
fmt.Println("Parsing trailers")
trailers, _ := FindTrailers()
fmt.Println("Generating feed")
feed := &feeds.RssFeed{
Title: "Trailers RSS Feed",
Link: "http://mediafeeds.malone.me.uk",
Description: "An RSS feed of IMDB trailers.",
ManagingEditor: "CjMalone@mail.com (Cj Malone)",
Category: "Trailers",
}
feed.Items = make([]*feeds.RssItem, len(trailers))
for i := 0; i < len(trailers); i++ {
fileInfo, err := os.Stat("/var/www/mediafeeds/Trailers/"+trailers[i].VideoID+".mp4")
if err != nil {
cmd := exec.Command("youtube-dl", "-o", "/var/www/mediafeeds/Trailers/"+trailers[i].VideoID+".mp4", "http://www.imdb.com/video/imdb/"+trailers[i].VideoID+"/imdb/embed")
var out bytes.Buffer
cmd.Stdout = &out
fmt.Printf("Downloading %s\n", trailers[i].MediaName)
err := cmd.Run()
if err != nil {
fmt.Println(err)
fmt.Println(out.String())
}
}
fileInfo, err = os.Stat("/var/www/mediafeeds/Trailers/"+trailers[i].VideoID+".mp4")
fileLength := "0"
if err != nil {
fmt.Println(err)
} else {
fileLength = strconv.FormatInt(fileInfo.Size(), 10)
}
feed.Items[i] = &feeds.RssItem{
Title: trailers[i].MediaName,
Link: "http://www.imdb.com/title/"+trailers[i].MediaID,
Description: "A trailer for \""+trailers[i].MediaName+"\" ("+trailers[i].MediaID+")",
Author: "CjMalone@mail.com (Cj Malone)",
Category: "Trailer",
Enclosure: &feeds.RssEnclosure{
Url: "http://mediafeeds.malone.me.uk/Trailers/"+trailers[i].VideoID+".mp4",
Length: fileLength,
Type: "video/mp4",
},
Guid: trailers[i].VideoID,
}
}
rss, err := feeds.ToXML(feed)
if err != nil {
fmt.Println(err)
}
fmt.Println("Feed created")
rssFile, err := os.OpenFile("/var/www/mediafeeds/trailers.xml", os.O_WRONLY | os.O_CREATE | os.O_TRUNC, os.FileMode(0644))
defer rssFile.Close()
if err != nil {
fmt.Println(err)
}
rssFile.WriteString(rss)
}
func FindTrailers() ([]Video, error) {
doc, err := goquery.NewDocument("http://www.imdb.com/trailers")
if err != nil {
return nil, err
}
recentTab := doc.Find("#recAddTab")
lengthStr, exists := recentTab.Find(".gridlist-item").Last().Attr("data-index")
if !exists {
return nil, nil //TODO
}
length, err := strconv.Atoi(lengthStr)
var trailers = make([]Video, length)
current := 0
recentTab.Find(".gridlist-item").Each(func(i int, s *goquery.Selection) {
rawTitle := s.Find(".trailer-caption").Text()
mediaLink, mediaLinkExists := s.Find(".trailer-caption").ChildrenFiltered("a").Attr("href")
videoLink, videoLinkExists := s.Find(".video-link").Attr("href")
if !mediaLinkExists || !videoLinkExists {
return //TODO
}
title := rawTitle[2 : len(rawTitle)-5]
var trailer Video
trailer.MediaName = title
trailer.MediaID = mediaLink[7:16]//TODO
splitLink := strings.Split(videoLink, "/")
trailer.VideoID = splitLink[3]
trailers[current] = trailer
current += 1
})
return trailers, nil
}
gofmt
package main
import (
"bytes"
"fmt"
"os"
"os/exec"
"strconv"
"strings"
"github.com/PuerkitoBio/goquery"
"github.com/gorilla/feeds"
)
type Video struct {
MediaName string
MediaID string
VideoID string
}
func main() {
fmt.Println("Parsing trailers")
trailers, _ := FindTrailers()
fmt.Println("Generating feed")
feed := &feeds.RssFeed{
Title: "Trailers RSS Feed",
Link: "http://mediafeeds.malone.me.uk",
Description: "An RSS feed of IMDB trailers.",
ManagingEditor: "CjMalone@mail.com (Cj Malone)",
Category: "Trailers",
}
feed.Items = make([]*feeds.RssItem, len(trailers))
for i := 0; i < len(trailers); i++ {
fileInfo, err := os.Stat("/var/www/mediafeeds/Trailers/" + trailers[i].VideoID + ".mp4")
if err != nil {
cmd := exec.Command("youtube-dl", "-o", "/var/www/mediafeeds/Trailers/"+trailers[i].VideoID+".mp4", "http://www.imdb.com/video/imdb/"+trailers[i].VideoID+"/imdb/embed")
var out bytes.Buffer
cmd.Stdout = &out
fmt.Printf("Downloading %s\n", trailers[i].MediaName)
err := cmd.Run()
if err != nil {
fmt.Println(err)
fmt.Println(out.String())
}
}
fileInfo, err = os.Stat("/var/www/mediafeeds/Trailers/" + trailers[i].VideoID + ".mp4")
fileLength := "0"
if err != nil {
fmt.Println(err)
} else {
fileLength = strconv.FormatInt(fileInfo.Size(), 10)
}
feed.Items[i] = &feeds.RssItem{
Title: trailers[i].MediaName,
Link: "http://www.imdb.com/title/" + trailers[i].MediaID,
Description: "A trailer for \"" + trailers[i].MediaName + "\" (" + trailers[i].MediaID + ")",
Author: "CjMalone@mail.com (Cj Malone)",
Category: "Trailer",
Enclosure: &feeds.RssEnclosure{
Url: "http://mediafeeds.malone.me.uk/Trailers/" + trailers[i].VideoID + ".mp4",
Length: fileLength,
Type: "video/mp4",
},
Guid: trailers[i].VideoID,
}
}
rss, err := feeds.ToXML(feed)
if err != nil {
fmt.Println(err)
}
fmt.Println("Feed created")
rssFile, err := os.OpenFile("/var/www/mediafeeds/trailers.xml", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(0644))
defer rssFile.Close()
if err != nil {
fmt.Println(err)
}
rssFile.WriteString(rss)
}
func FindTrailers() ([]Video, error) {
doc, err := goquery.NewDocument("http://www.imdb.com/trailers")
if err != nil {
return nil, err
}
recentTab := doc.Find("#recAddTab")
lengthStr, exists := recentTab.Find(".gridlist-item").Last().Attr("data-index")
if !exists {
return nil, nil //TODO
}
length, err := strconv.Atoi(lengthStr)
var trailers = make([]Video, length)
current := 0
recentTab.Find(".gridlist-item").Each(func(i int, s *goquery.Selection) {
rawTitle := s.Find(".trailer-caption").Text()
mediaLink, mediaLinkExists := s.Find(".trailer-caption").ChildrenFiltered("a").Attr("href")
videoLink, videoLinkExists := s.Find(".video-link").Attr("href")
if !mediaLinkExists || !videoLinkExists {
return //TODO
}
title := rawTitle[2 : len(rawTitle)-5]
var trailer Video
trailer.MediaName = title
trailer.MediaID = mediaLink[7:16] //TODO
splitLink := strings.Split(videoLink, "/")
trailer.VideoID = splitLink[3]
trailers[current] = trailer
current += 1
})
return trailers, nil
}
|
package xorm
import (
"sync"
"github.com/xormplus/core"
)
const (
PROPAGATION_REQUIRED = 0 //Support a current transaction; create a new one if none exists.
PROPAGATION_SUPPORTS = 1 //Support a current transaction; execute non-transactionally if none exists.
PROPAGATION_MANDATORY = 2 //Support a current transaction; return an error if no current transaction exists.
PROPAGATION_REQUIRES_NEW = 3 //Create a new transaction, suspending the current transaction if one exists.
PROPAGATION_NOT_SUPPORTED = 4 //Do not support a current transaction; rather always execute non-transactionally.
PROPAGATION_NEVER = 5 //Do not support a current transaction; return an error if a current transaction exists.
PROPAGATION_NESTED = 6 //Execute within a nested transaction if a current transaction exists, behave like PROPAGATION_REQUIRED else.
)
type Transaction struct {
txSession *Session
transactionDefinition int
isNested bool
savePointID string
}
func (transaction *Transaction) TransactionDefinition() int {
return transaction.transactionDefinition
}
func (transaction *Transaction) IsExistingTransaction() bool {
if transaction.txSession.Tx == nil {
return false
} else {
return true
}
}
func (transaction *Transaction) GetSavePointID() string {
return transaction.savePointID
}
func (transaction *Transaction) Session() *Session {
return transaction.txSession
}
func (transaction *Transaction) Do(doFunc func(params ...interface{}), params ...interface{}) {
if transaction.isNested {
go doFunc(params...)
} else {
doFunc(params...)
}
}
func (transaction *Transaction) WaitToDo(doFunc func(params ...interface{}), params ...interface{}) {
if transaction.isNested {
var w sync.WaitGroup
w.Add(1)
go func() {
doFunc(params...)
w.Done()
}()
w.Wait()
} else {
doFunc(params...)
}
}
func (session *Session) Begin(transactionDefinition ...int) (*Transaction, error) {
var tx *Transaction
if len(transactionDefinition) == 0 {
tx = session.transaction(PROPAGATION_REQUIRED)
} else {
tx = session.transaction(transactionDefinition[0])
}
err := tx.Begin()
if err != nil {
return nil, err
}
return tx, nil
}
func (session *Session) transaction(transactionDefinition int) *Transaction {
if transactionDefinition > 6 || transactionDefinition < 0 {
return &Transaction{txSession: session, transactionDefinition: PROPAGATION_REQUIRED}
}
return &Transaction{txSession: session, transactionDefinition: transactionDefinition}
}
// Begin a transaction
func (transaction *Transaction) Begin() error {
switch transaction.transactionDefinition {
case PROPAGATION_REQUIRED:
if !transaction.IsExistingTransaction() {
if err := transaction.txSession.begin(); err != nil {
return err
}
} else {
if transaction.txSession.currentTransaction != nil {
transaction.savePointID = transaction.txSession.currentTransaction.savePointID
}
transaction.isNested = true
}
transaction.txSession.currentTransaction = transaction
return nil
case PROPAGATION_SUPPORTS:
if transaction.IsExistingTransaction() {
transaction.isNested = true
if transaction.txSession.currentTransaction != nil {
transaction.savePointID = transaction.txSession.currentTransaction.savePointID
}
transaction.txSession.currentTransaction = transaction
}
return nil
case PROPAGATION_MANDATORY:
if !transaction.IsExistingTransaction() {
return ErrNestedTransaction
} else {
if transaction.txSession.currentTransaction != nil {
transaction.savePointID = transaction.txSession.currentTransaction.savePointID
}
transaction.isNested = true
transaction.txSession.currentTransaction = transaction
}
return nil
case PROPAGATION_REQUIRES_NEW:
transaction.txSession = transaction.txSession.Engine.NewSession()
if err := transaction.txSession.begin(); err != nil {
return err
}
transaction.isNested = false
transaction.txSession.currentTransaction = transaction
return nil
case PROPAGATION_NOT_SUPPORTED:
transaction.txSession = transaction.txSession.Engine.NewSession()
if transaction.IsExistingTransaction() {
transaction.isNested = true
}
return nil
case PROPAGATION_NEVER:
if transaction.IsExistingTransaction() {
return ErrNestedTransaction
}
return nil
case PROPAGATION_NESTED:
if !transaction.IsExistingTransaction() {
if err := transaction.txSession.begin(); err != nil {
return err
}
} else {
transaction.isNested = true
dbtype := transaction.txSession.Engine.Dialect().DBType()
if dbtype == core.MSSQL {
transaction.savePointID = "xorm" + NewShortUUID().String()
} else {
transaction.savePointID = "xorm" + NewV1().WithoutDashString()
}
if err := transaction.SavePoint(transaction.savePointID); err != nil {
return err
}
transaction.txSession.IsAutoCommit = false
transaction.txSession.IsCommitedOrRollbacked = false
transaction.txSession.currentTransaction = transaction
}
return nil
default:
return ErrTransactionDefinition
}
}
// Commit When using transaction, Commit will commit all operations.
func (transaction *Transaction) Commit() error {
switch transaction.transactionDefinition {
case PROPAGATION_REQUIRED:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
if !transaction.isNested {
err := transaction.txSession.commit()
if err != nil {
return err
}
}
return nil
case PROPAGATION_SUPPORTS:
if transaction.IsExistingTransaction() {
if !transaction.isNested {
err := transaction.txSession.commit()
if err != nil {
return err
}
}
}
return nil
case PROPAGATION_MANDATORY:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
if !transaction.isNested {
err := transaction.txSession.commit()
if err != nil {
return err
}
}
return nil
case PROPAGATION_REQUIRES_NEW:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
if !transaction.isNested {
err := transaction.txSession.commit()
if err != nil {
return err
}
}
return nil
case PROPAGATION_NOT_SUPPORTED:
if transaction.IsExistingTransaction() {
return ErrNestedTransaction
}
return nil
case PROPAGATION_NEVER:
if transaction.IsExistingTransaction() {
return ErrNestedTransaction
}
return nil
case PROPAGATION_NESTED:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
if !transaction.isNested {
err := transaction.txSession.commit()
if err != nil {
return err
}
}
return nil
default:
return ErrTransactionDefinition
}
}
// Rollback When using transaction, you can rollback if any error
func (transaction *Transaction) Rollback() error {
switch transaction.transactionDefinition {
case PROPAGATION_REQUIRED:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
err := transaction.txSession.rollback()
if err != nil {
return err
}
return nil
case PROPAGATION_SUPPORTS:
if transaction.IsExistingTransaction() {
err := transaction.txSession.rollback()
if err != nil {
return err
}
return nil
}
return nil
case PROPAGATION_MANDATORY:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
if transaction.savePointID != "" {
if err := transaction.RollbackToSavePoint(transaction.savePointID); err != nil {
return err
}
return nil
} else {
err := transaction.txSession.rollback()
if err != nil {
return err
}
return nil
}
case PROPAGATION_REQUIRES_NEW:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
err := transaction.txSession.rollback()
if err != nil {
return err
}
return nil
case PROPAGATION_NOT_SUPPORTED:
if transaction.IsExistingTransaction() {
return ErrNestedTransaction
}
return nil
case PROPAGATION_NEVER:
if transaction.IsExistingTransaction() {
return ErrNestedTransaction
}
return nil
case PROPAGATION_NESTED:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
if transaction.isNested {
if err := transaction.RollbackToSavePoint(transaction.savePointID); err != nil {
return err
}
return nil
} else {
err := transaction.txSession.rollback()
if err != nil {
return err
}
return nil
}
default:
return ErrTransactionDefinition
}
}
func (transaction *Transaction) SavePoint(savePointID string) error {
if transaction.txSession.Tx == nil {
return ErrNotInTransaction
}
var lastSQL string
dbtype := transaction.txSession.Engine.Dialect().DBType()
if dbtype == core.MSSQL {
lastSQL = "save tran " + savePointID
} else {
lastSQL = "SAVEPOINT " + savePointID + ";"
}
transaction.txSession.saveLastSQL(lastSQL)
if _, err := transaction.txSession.Tx.Exec(lastSQL); err != nil {
return err
}
return nil
}
func (transaction *Transaction) RollbackToSavePoint(savePointID string) error {
if transaction.txSession.Tx == nil {
return ErrNotInTransaction
}
var lastSQL string
dbtype := transaction.txSession.Engine.Dialect().DBType()
if dbtype == core.MSSQL {
lastSQL = "rollback tran " + savePointID
} else {
lastSQL = "ROLLBACK TO SAVEPOINT " + transaction.savePointID + ";"
}
transaction.txSession.saveLastSQL(lastSQL)
if _, err := transaction.txSession.Tx.Exec(lastSQL); err != nil {
return err
}
return nil
}
修改函数名
package xorm
import (
"sync"
"github.com/xormplus/core"
)
const (
PROPAGATION_REQUIRED = 0 //Support a current transaction; create a new one if none exists.
PROPAGATION_SUPPORTS = 1 //Support a current transaction; execute non-transactionally if none exists.
PROPAGATION_MANDATORY = 2 //Support a current transaction; return an error if no current transaction exists.
PROPAGATION_REQUIRES_NEW = 3 //Create a new transaction, suspending the current transaction if one exists.
PROPAGATION_NOT_SUPPORTED = 4 //Do not support a current transaction; rather always execute non-transactionally.
PROPAGATION_NEVER = 5 //Do not support a current transaction; return an error if a current transaction exists.
PROPAGATION_NESTED = 6 //Execute within a nested transaction if a current transaction exists, behave like PROPAGATION_REQUIRED else.
)
type Transaction struct {
txSession *Session
transactionDefinition int
isNested bool
savePointID string
}
func (transaction *Transaction) TransactionDefinition() int {
return transaction.transactionDefinition
}
func (transaction *Transaction) IsExistingTransaction() bool {
if transaction.txSession.Tx == nil {
return false
} else {
return true
}
}
func (transaction *Transaction) GetSavePointID() string {
return transaction.savePointID
}
func (transaction *Transaction) Session() *Session {
return transaction.txSession
}
func (transaction *Transaction) Do(doFunc func(params ...interface{}), params ...interface{}) {
if transaction.isNested {
go doFunc(params...)
} else {
doFunc(params...)
}
}
func (transaction *Transaction) WaitForDo(doFunc func(params ...interface{}), params ...interface{}) {
if transaction.isNested {
var w sync.WaitGroup
w.Add(1)
go func() {
doFunc(params...)
w.Done()
}()
w.Wait()
} else {
doFunc(params...)
}
}
func (session *Session) Begin(transactionDefinition ...int) (*Transaction, error) {
var tx *Transaction
if len(transactionDefinition) == 0 {
tx = session.transaction(PROPAGATION_REQUIRED)
} else {
tx = session.transaction(transactionDefinition[0])
}
err := tx.Begin()
if err != nil {
return nil, err
}
return tx, nil
}
func (session *Session) transaction(transactionDefinition int) *Transaction {
if transactionDefinition > 6 || transactionDefinition < 0 {
return &Transaction{txSession: session, transactionDefinition: PROPAGATION_REQUIRED}
}
return &Transaction{txSession: session, transactionDefinition: transactionDefinition}
}
// Begin a transaction
func (transaction *Transaction) Begin() error {
switch transaction.transactionDefinition {
case PROPAGATION_REQUIRED:
if !transaction.IsExistingTransaction() {
if err := transaction.txSession.begin(); err != nil {
return err
}
} else {
if transaction.txSession.currentTransaction != nil {
transaction.savePointID = transaction.txSession.currentTransaction.savePointID
}
transaction.isNested = true
}
transaction.txSession.currentTransaction = transaction
return nil
case PROPAGATION_SUPPORTS:
if transaction.IsExistingTransaction() {
transaction.isNested = true
if transaction.txSession.currentTransaction != nil {
transaction.savePointID = transaction.txSession.currentTransaction.savePointID
}
transaction.txSession.currentTransaction = transaction
}
return nil
case PROPAGATION_MANDATORY:
if !transaction.IsExistingTransaction() {
return ErrNestedTransaction
} else {
if transaction.txSession.currentTransaction != nil {
transaction.savePointID = transaction.txSession.currentTransaction.savePointID
}
transaction.isNested = true
transaction.txSession.currentTransaction = transaction
}
return nil
case PROPAGATION_REQUIRES_NEW:
transaction.txSession = transaction.txSession.Engine.NewSession()
if err := transaction.txSession.begin(); err != nil {
return err
}
transaction.isNested = false
transaction.txSession.currentTransaction = transaction
return nil
case PROPAGATION_NOT_SUPPORTED:
transaction.txSession = transaction.txSession.Engine.NewSession()
if transaction.IsExistingTransaction() {
transaction.isNested = true
}
return nil
case PROPAGATION_NEVER:
if transaction.IsExistingTransaction() {
return ErrNestedTransaction
}
return nil
case PROPAGATION_NESTED:
if !transaction.IsExistingTransaction() {
if err := transaction.txSession.begin(); err != nil {
return err
}
} else {
transaction.isNested = true
dbtype := transaction.txSession.Engine.Dialect().DBType()
if dbtype == core.MSSQL {
transaction.savePointID = "xorm" + NewShortUUID().String()
} else {
transaction.savePointID = "xorm" + NewV1().WithoutDashString()
}
if err := transaction.SavePoint(transaction.savePointID); err != nil {
return err
}
transaction.txSession.IsAutoCommit = false
transaction.txSession.IsCommitedOrRollbacked = false
transaction.txSession.currentTransaction = transaction
}
return nil
default:
return ErrTransactionDefinition
}
}
// Commit When using transaction, Commit will commit all operations.
func (transaction *Transaction) Commit() error {
switch transaction.transactionDefinition {
case PROPAGATION_REQUIRED:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
if !transaction.isNested {
err := transaction.txSession.commit()
if err != nil {
return err
}
}
return nil
case PROPAGATION_SUPPORTS:
if transaction.IsExistingTransaction() {
if !transaction.isNested {
err := transaction.txSession.commit()
if err != nil {
return err
}
}
}
return nil
case PROPAGATION_MANDATORY:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
if !transaction.isNested {
err := transaction.txSession.commit()
if err != nil {
return err
}
}
return nil
case PROPAGATION_REQUIRES_NEW:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
if !transaction.isNested {
err := transaction.txSession.commit()
if err != nil {
return err
}
}
return nil
case PROPAGATION_NOT_SUPPORTED:
if transaction.IsExistingTransaction() {
return ErrNestedTransaction
}
return nil
case PROPAGATION_NEVER:
if transaction.IsExistingTransaction() {
return ErrNestedTransaction
}
return nil
case PROPAGATION_NESTED:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
if !transaction.isNested {
err := transaction.txSession.commit()
if err != nil {
return err
}
}
return nil
default:
return ErrTransactionDefinition
}
}
// Rollback When using transaction, you can rollback if any error
func (transaction *Transaction) Rollback() error {
switch transaction.transactionDefinition {
case PROPAGATION_REQUIRED:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
err := transaction.txSession.rollback()
if err != nil {
return err
}
return nil
case PROPAGATION_SUPPORTS:
if transaction.IsExistingTransaction() {
err := transaction.txSession.rollback()
if err != nil {
return err
}
return nil
}
return nil
case PROPAGATION_MANDATORY:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
if transaction.savePointID != "" {
if err := transaction.RollbackToSavePoint(transaction.savePointID); err != nil {
return err
}
return nil
} else {
err := transaction.txSession.rollback()
if err != nil {
return err
}
return nil
}
case PROPAGATION_REQUIRES_NEW:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
err := transaction.txSession.rollback()
if err != nil {
return err
}
return nil
case PROPAGATION_NOT_SUPPORTED:
if transaction.IsExistingTransaction() {
return ErrNestedTransaction
}
return nil
case PROPAGATION_NEVER:
if transaction.IsExistingTransaction() {
return ErrNestedTransaction
}
return nil
case PROPAGATION_NESTED:
if !transaction.IsExistingTransaction() {
return ErrNotInTransaction
}
if transaction.isNested {
if err := transaction.RollbackToSavePoint(transaction.savePointID); err != nil {
return err
}
return nil
} else {
err := transaction.txSession.rollback()
if err != nil {
return err
}
return nil
}
default:
return ErrTransactionDefinition
}
}
func (transaction *Transaction) SavePoint(savePointID string) error {
if transaction.txSession.Tx == nil {
return ErrNotInTransaction
}
var lastSQL string
dbtype := transaction.txSession.Engine.Dialect().DBType()
if dbtype == core.MSSQL {
lastSQL = "save tran " + savePointID
} else {
lastSQL = "SAVEPOINT " + savePointID + ";"
}
transaction.txSession.saveLastSQL(lastSQL)
if _, err := transaction.txSession.Tx.Exec(lastSQL); err != nil {
return err
}
return nil
}
func (transaction *Transaction) RollbackToSavePoint(savePointID string) error {
if transaction.txSession.Tx == nil {
return ErrNotInTransaction
}
var lastSQL string
dbtype := transaction.txSession.Engine.Dialect().DBType()
if dbtype == core.MSSQL {
lastSQL = "rollback tran " + savePointID
} else {
lastSQL = "ROLLBACK TO SAVEPOINT " + transaction.savePointID + ";"
}
transaction.txSession.saveLastSQL(lastSQL)
if _, err := transaction.txSession.Tx.Exec(lastSQL); err != nil {
return err
}
return nil
}
|
// +build freebsd
// tun_freebsd.go -- tun interface with cgo for linux / bsd
//
package samtun
/*
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <arpa/inet.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <net/if.h>
#include <net/if_tun.h>
#include <stdio.h>
#include <stdlib.h>
char * tundev_open(int * tunfd) {
char * name = (char *) malloc(IFNAMSIZ);
int tun = 0;
*tunfd = -1;
do {
memset(name, 0, IFNAMSIZ);
sprintf(name, "/dev/tun%d", tun);
int fd = open(name, O_RDWR);
if (fd > 0) {
int i = 0;
if ( ioctl(fd, TUNSIFHEAD, &i) < 0 ) {
close(fd);
perror("TUNSIFHEAD");
return -1;
}
*tunfd = fd;
break;
}
tun ++;
} while(tun < 10);
return name;
}
int tundev_up(char * ifname, char * addr, char * netmask, int mtu) {
struct ifreq ifr;
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
int fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
if ( fd > 0 ) {
ifr.ifr_mtu = mtu;
if ( ioctl(fd, SIOCSIFMTU, (void*) &ifr) < 0) {
close(fd);
perror("SIOCSIFMTU");
return -1;
}
struct sockaddr_in src;
memset(&src, 0, sizeof(struct sockaddr_in));
src.sin_family = AF_INET;
if ( ! inet_aton(addr, &src.sin_addr) ) {
printf("invalid srcaddr %s\n", addr);
close(fd);
return -1;
}
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
memcpy(&ifr.ifr_addr, &src, sizeof(struct sockaddr_in));
if ( ioctl(fd, SIOCSIFADDR, (void*)&ifr) < 0 ) {
close(fd);
perror("SIOCSIFADDR");
return -1;
}
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
if ( ioctl(fd, SIOCGIFFLAGS, (void*)&ifr) < 0 ) {
close(fd);
perror("SIOCGIFFLAGS");
return -1;
}
ifr.ifr_flags |= IFF_UP ;
if ( ioctl(fd, SIOCSIFFLAGS, (void*)&ifr) < 0 ) {
perror("SIOCSIFFLAGS");
close(fd);
return -1;
}
close(fd);
return 0;
}
return -1;
}
void tundev_close(int fd) {
close(fd);
}
void tundev_free(const char * name) {
if (name) {
free((void*)name);
}
}
*/
import "C"
import (
"errors"
)
type tunDev struct {
fd C.int
}
func newTun(ifname, addr, dstaddr string, mtu int) (t tunDev, err error) {
name := C.tundev_open(&t.fd)
if t.fd == C.int(-1) {
err = errors.New("cannot open tun interface")
} else {
if C.tundev_up(name, C.CString(addr), C.CString(dstaddr), C.int(mtu)) < C.int(0) {
err = errors.New("cannot put up interface")
}
}
C.tundev_free(name)
return
}
// read from the tun device
func (t *tunDev) Read(d []byte) (n int, err error) {
return fdRead(C.int(t.fd), d)
}
func (t *tunDev) Write(d []byte) (n int, err error) {
return fdWrite(C.int(t.fd), d)
}
func (t *tunDev) Close() {
C.tundev_close(C.int(t.fd))
}
try fixing freebsd
// +build freebsd
// tun_freebsd.go -- tun interface with cgo for linux / bsd
//
package samtun
/*
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <arpa/inet.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <net/if.h>
#include <net/if_tun.h>
#include <stdio.h>
#include <stdlib.h>
char * tundev_open(int * tunfd) {
char * name = (char *) malloc(IFNAMSIZ);
int tun = 0;
*tunfd = -1;
do {
memset(name, 0, IFNAMSIZ);
sprintf(name, "/dev/tun%d", tun);
int fd = open(name, O_RDWR);
if (fd > 0) {
int i = 0;
if ( ioctl(fd, TUNSIFHEAD, &i) < 0 ) {
close(fd);
perror("TUNSIFHEAD");
break;
}
*tunfd = fd;
break;
}
tun ++;
} while(tun < 10);
return name;
}
int tundev_up(char * ifname, char * addr, char * netmask, int mtu) {
struct ifreq ifr;
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
int fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
if ( fd > 0 ) {
ifr.ifr_mtu = mtu;
if ( ioctl(fd, SIOCSIFMTU, (void*) &ifr) < 0) {
close(fd);
perror("SIOCSIFMTU");
return -1;
}
struct sockaddr_in src;
memset(&src, 0, sizeof(struct sockaddr_in));
src.sin_family = AF_INET;
if ( ! inet_aton(addr, &src.sin_addr) ) {
printf("invalid srcaddr %s\n", addr);
close(fd);
return -1;
}
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
memcpy(&ifr.ifr_addr, &src, sizeof(struct sockaddr_in));
if ( ioctl(fd, SIOCSIFADDR, (void*)&ifr) < 0 ) {
close(fd);
perror("SIOCSIFADDR");
return -1;
}
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
if ( ioctl(fd, SIOCGIFFLAGS, (void*)&ifr) < 0 ) {
close(fd);
perror("SIOCGIFFLAGS");
return -1;
}
ifr.ifr_flags |= IFF_UP ;
if ( ioctl(fd, SIOCSIFFLAGS, (void*)&ifr) < 0 ) {
perror("SIOCSIFFLAGS");
close(fd);
return -1;
}
close(fd);
return 0;
}
return -1;
}
void tundev_close(int fd) {
close(fd);
}
void tundev_free(const char * name) {
if (name) {
free((void*)name);
}
}
*/
import "C"
import (
"errors"
)
type tunDev struct {
fd C.int
}
func newTun(ifname, addr, dstaddr string, mtu int) (t tunDev, err error) {
name := C.tundev_open(&t.fd)
if t.fd == C.int(-1) {
err = errors.New("cannot open tun interface")
} else {
if C.tundev_up(name, C.CString(addr), C.CString(dstaddr), C.int(mtu)) < C.int(0) {
err = errors.New("cannot put up interface")
}
}
C.tundev_free(name)
return
}
// read from the tun device
func (t *tunDev) Read(d []byte) (n int, err error) {
return fdRead(C.int(t.fd), d)
}
func (t *tunDev) Write(d []byte) (n int, err error) {
return fdWrite(C.int(t.fd), d)
}
func (t *tunDev) Close() {
C.tundev_close(C.int(t.fd))
}
|
// +build freebsd
// tun_freebsd.go -- tun interface with cgo for linux / bsd
//
package samtun
/*
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <arpa/inet.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <net/if.h>
#include <net/if_tun.h>
#include <stdio.h>
const char * tundev_open(int * tunfd) {
const char * name = (const char *) malloc(IFNAMSIZ);
int tun = 0;
*tunfd = -1;
do {
memset(name, 0, IFNAMSIZ);
sprintf(name, "/dev/tun%s", tun);
int fd = open(name, O_RDWR);
if (fd > 0) {
int i = 0;
if ( ioctl(fd, TUNSIFHEAD, &i) < 0 ) {
close(fd);
perror("TUNSIFHEAD");
return -1;
}
*tunfd = fd;
break;
}
tun ++;
} while(tun < 10);
return name;
}
int tundev_up(char * ifname, char * addr, char * netmask, int mtu) {
struct ifreq ifr;
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
int fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
if ( fd > 0 ) {
ifr.ifr_mtu = mtu;
if ( ioctl(fd, SIOCSIFMTU, (void*) &ifr) < 0) {
close(fd);
perror("SIOCSIFMTU");
return -1;
}
struct sockaddr_in src;
memset(&src, 0, sizeof(struct sockaddr_in));
src.sin_family = AF_INET;
if ( ! inet_aton(addr, &src.sin_addr) ) {
printf("invalid srcaddr %s\n", addr);
close(fd);
return -1;
}
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
memcpy(&ifr.ifr_addr, &src, sizeof(struct sockaddr_in));
if ( ioctl(fd, SIOCSIFADDR, (void*)&ifr) < 0 ) {
close(fd);
perror("SIOCSIFADDR");
return -1;
}
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
if ( ioctl(fd, SIOCGIFFLAGS, (void*)&ifr) < 0 ) {
close(fd);
perror("SIOCGIFFLAGS");
return -1;
}
ifr.ifr_flags |= IFF_UP ;
if ( ioctl(fd, SIOCSIFFLAGS, (void*)&ifr) < 0 ) {
perror("SIOCSIFFLAGS");
close(fd);
return -1;
}
close(fd);
return 0;
}
return -1;
}
void tundev_close(int fd) {
close(fd);
}
*/
import "C"
import (
"errors"
)
type tunDev struct {
fd C.int
}
func newTun(ifname, addr, dstaddr string, mtu int) (t tunDev, err error) {
name := C.tundev_open(&t.fd)
if t.fd == C.int(-1) {
err = errors.New("cannot open tun interface")
} else {
if C.tundev_up(name, C.CString(addr), C.CString(dstaddr), C.int(mtu)) < C.int(0) {
err = errors.New("cannot put up interface")
}
}
C.free(name)
return
}
// read from the tun device
func (t *tunDev) Read(d []byte) (n int, err error) {
return fdRead(C.int(t.fd), d)
}
func (t *tunDev) Write(d []byte) (n int, err error) {
return fdWrite(C.int(t.fd), d)
}
func (t *tunDev) Close() {
C.tundev_close(C.int(t.fd))
}
try fixing freebsd
// +build freebsd
// tun_freebsd.go -- tun interface with cgo for linux / bsd
//
package samtun
/*
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <arpa/inet.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <net/if.h>
#include <net/if_tun.h>
#include <stdio.h>
#include <stdlib.h>
const char * tundev_open(int * tunfd) {
const char * name = (const char *) malloc(IFNAMSIZ);
int tun = 0;
*tunfd = -1;
do {
memset(name, 0, IFNAMSIZ);
sprintf(name, "/dev/tun%s", tun);
int fd = open(name, O_RDWR);
if (fd > 0) {
int i = 0;
if ( ioctl(fd, TUNSIFHEAD, &i) < 0 ) {
close(fd);
perror("TUNSIFHEAD");
return -1;
}
*tunfd = fd;
break;
}
tun ++;
} while(tun < 10);
return name;
}
int tundev_up(char * ifname, char * addr, char * netmask, int mtu) {
struct ifreq ifr;
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
int fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
if ( fd > 0 ) {
ifr.ifr_mtu = mtu;
if ( ioctl(fd, SIOCSIFMTU, (void*) &ifr) < 0) {
close(fd);
perror("SIOCSIFMTU");
return -1;
}
struct sockaddr_in src;
memset(&src, 0, sizeof(struct sockaddr_in));
src.sin_family = AF_INET;
if ( ! inet_aton(addr, &src.sin_addr) ) {
printf("invalid srcaddr %s\n", addr);
close(fd);
return -1;
}
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
memcpy(&ifr.ifr_addr, &src, sizeof(struct sockaddr_in));
if ( ioctl(fd, SIOCSIFADDR, (void*)&ifr) < 0 ) {
close(fd);
perror("SIOCSIFADDR");
return -1;
}
memset(&ifr, 0, sizeof(struct ifreq));
strncpy(ifr.ifr_name, ifname, IFNAMSIZ);
if ( ioctl(fd, SIOCGIFFLAGS, (void*)&ifr) < 0 ) {
close(fd);
perror("SIOCGIFFLAGS");
return -1;
}
ifr.ifr_flags |= IFF_UP ;
if ( ioctl(fd, SIOCSIFFLAGS, (void*)&ifr) < 0 ) {
perror("SIOCSIFFLAGS");
close(fd);
return -1;
}
close(fd);
return 0;
}
return -1;
}
void tundev_close(int fd) {
close(fd);
}
*/
import "C"
import (
"errors"
)
type tunDev struct {
fd C.int
}
func newTun(ifname, addr, dstaddr string, mtu int) (t tunDev, err error) {
name := C.tundev_open(&t.fd)
if t.fd == C.int(-1) {
err = errors.New("cannot open tun interface")
} else {
if C.tundev_up(name, C.CString(addr), C.CString(dstaddr), C.int(mtu)) < C.int(0) {
err = errors.New("cannot put up interface")
}
}
C.free(name)
return
}
// read from the tun device
func (t *tunDev) Read(d []byte) (n int, err error) {
return fdRead(C.int(t.fd), d)
}
func (t *tunDev) Write(d []byte) (n int, err error) {
return fdWrite(C.int(t.fd), d)
}
func (t *tunDev) Close() {
C.tundev_close(C.int(t.fd))
}
|
package escalation
import (
"math"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/eliothedeman/bangarang/event"
"github.com/eliothedeman/smoothie"
)
const (
DEFAULT_WINDOW_SIZE = 2 // The default size of the dataframe used in window operations
STATUS_SIZE = 10 // The default size of the dataframe used to count statuses
MIN_STD_DEV_WINDOW_SIZE = 5 // the smallets a window size can be for a standard deviation check
)
// Condition holds conditional information to check events against
type Condition struct {
Greater *float64 `json:"greater"`
Less *float64 `json:"less"`
Exactly *float64 `json:"exactly"`
StdDev bool `json:"std_dev"`
Derivative bool `json:"derivative"`
HoltWinters bool `json:"holt_winters"`
Simple bool `json:"simple"`
Occurences int `json:"occurences"`
WindowSize int `json:"window_size"`
Aggregation *Aggregation `json:"agregation"`
trackFunc TrackFunc
checks []satisfier
eventTrackers map[string]*eventTracker
sync.Mutex
ready bool
}
// Config for checks based on the aggrigation of data over a time window, instead of individual data points
type Aggregation struct {
WindowLength int `json:"window_length"`
}
type aggregator struct {
nextCloseout time.Time
}
type eventTracker struct {
df *smoothie.DataFrame
states *smoothie.DataFrame
count int
occurences int
// optional
agg *aggregator
}
func (e *eventTracker) refresh() {
e.states = smoothie.NewDataFrameFromSlice(make([]float64, STATUS_SIZE))
e.occurences = 0
}
type satisfier func(e *event.Event) bool
func (c *Condition) newTracker() *eventTracker {
et := &eventTracker{
df: smoothie.NewDataFrameFromSlice(make([]float64, c.WindowSize)),
states: smoothie.NewDataFrameFromSlice(make([]float64, STATUS_SIZE)),
}
if c.Aggregation != nil {
et.agg = &aggregator{}
}
return et
}
func (c *Condition) DoOnTracker(e *event.Event, dot func(*eventTracker)) {
// c.Lock()
et, ok := c.eventTrackers[e.IndexName()]
if !ok {
et = c.newTracker()
c.eventTrackers[e.IndexName()] = et
}
dot(et)
// c.Unlock()
}
func (c *Condition) getTracker(e *event.Event) *eventTracker {
if c.eventTrackers == nil {
c.eventTrackers = make(map[string]*eventTracker)
}
et, ok := c.eventTrackers[e.IndexName()]
if !ok {
et = c.newTracker()
c.eventTrackers[e.IndexName()] = et
}
return et
}
type TrackFunc func(c *Condition, e *event.Event) bool
func AggregationTrack(c *Condition, e *event.Event) bool {
c.DoOnTracker(e, func(t *eventTracker) {
// if we are still within the closeout, add to the current value
if time.Now().Before(t.agg.nextCloseout) {
t.df.Insert(0, t.df.Index(0)+e.Metric)
// if we are after the closeout, start a new datapoint and close out the old one
} else {
t.df.Push(e.Metric)
t.agg.nextCloseout = time.Now().Add(time.Second * time.Duration(c.Aggregation.WindowLength))
}
})
return c.OccurencesHit(e)
}
func SimpleTrack(c *Condition, e *event.Event) bool {
t := c.getTracker(e)
t.df.Push(e.Metric)
t.count += 1
return c.OccurencesHit(e)
}
// check to see if this condition should be treated as simple
func (c *Condition) isSimple() bool {
if c.Simple {
return true
}
// if nothing is set, default to simple
if !(c.StdDev || c.HoltWinters || c.Derivative) {
return true
}
return false
}
// start tracking an event, and returns if the event has hit it's occurence settings
func (c *Condition) TrackEvent(e *event.Event) bool {
return c.trackFunc(c, e)
}
func (c *Condition) StateChanged(e *event.Event) bool {
t := c.getTracker(e)
if t.count == 0 && t.states.Index(t.states.Len()-1) != 0 {
return true
}
return t.states.Index(t.states.Len()-1) != t.states.Index(t.states.Len()-2)
}
// check to see if an event has hit the occurences level
func (c *Condition) OccurencesHit(e *event.Event) bool {
t := c.getTracker(e)
if c.Satisfies(e) {
t.occurences += 1
} else {
t.occurences = 0
}
if t.occurences >= c.Occurences {
t.states.Push(1)
} else {
t.states.Push(0)
}
return t.occurences >= c.Occurences
}
// check if an event satisfies a condition
func (c *Condition) Satisfies(e *event.Event) bool {
for _, check := range c.checks {
if check(e) {
return true
}
}
return false
}
// create a list of checks that the condition will use to test events
func (c *Condition) compileChecks() []satisfier {
s := []satisfier{}
// if any of the special checks are included, only one check can be implemented per condition
if !c.isSimple() {
if c.StdDev {
// check to see if the window size is large enough. The minimum 5.
if c.WindowSize < MIN_STD_DEV_WINDOW_SIZE {
logrus.Errorf("Window size %d is too small for a standard deviation check", c.WindowSize)
// stop short
return s
}
sigma := math.NaN()
// get the sigma value
if c.Greater != nil {
sigma = *c.Greater
} else {
// default to 5 sigma
sigma = 5
}
logrus.Infof("Adding standard deviation check of %f sigma", sigma)
s = append(s, func(e *event.Event) bool {
t := c.getTracker(e)
// if the count is greater than 1/4 the window size, start checking
if t.count > t.df.Len()/4 {
// if the count is greater than the window size, use the whole df
if t.count >= t.df.Len() {
return math.Abs(e.Metric-t.df.Avg()) > (sigma * t.df.StdDev())
}
// take a sublslice of populated values
return math.Abs(e.Metric-t.df.Index(t.df.Len()-2)) > (sigma * t.df.StdDev())
}
return false
})
return s
}
if c.Derivative {
check := math.NaN()
var kind uint8
// get the check value
if c.Greater != nil {
kind = 1
check = *c.Greater
} else if c.Less != nil {
kind = 2
check = *c.Less
} else if c.Exactly != nil {
kind = 3
check = *c.Exactly
} else {
logrus.Error("No derivitive type supplied. >, <, == required")
}
if kind != 0 {
logrus.Infof("Adding derivative check of %f", check)
s = append(s, func(e *event.Event) bool {
t := c.getTracker(e)
// we need to have seen at least enough events to
if t.count < t.df.Len() {
return false
}
diff := e.Metric - t.df.Index(0)
switch kind {
case 1:
return diff > check
case 2:
return diff < check
case 3:
return diff == check
}
return false
})
}
return s
}
} else {
if c.Greater != nil {
logrus.Info("Adding greater than check:", *c.Greater)
gt := *c.Greater
s = append(s, func(e *event.Event) bool {
return e.Metric > gt
})
}
if c.Less != nil {
logrus.Info("Adding less than check:", *c.Less)
lt := *c.Less
s = append(s, func(e *event.Event) bool {
return e.Metric < lt
})
}
if c.Exactly != nil {
logrus.Info("Adding exactly check:", *c.Exactly)
ex := *c.Exactly
s = append(s, func(e *event.Event) bool {
return e.Metric == ex
})
}
}
// if we are using aggregation, replace all with the aggregation form
if c.Aggregation != nil {
logrus.Infof("Converting %d checks to using aggregation", len(s))
for i := range s {
s[i] = c.wrapAggregation(s[i])
}
}
return s
}
func (c *Condition) wrapAggregation(s satisfier) satisfier {
return func(e *event.Event) bool {
// create a new event with the aggregated value
ne := *e
c.DoOnTracker(e, func(t *eventTracker) {
ne.Metric = t.df.Index(0)
})
return s(&ne)
}
}
func getTrackingFunc(c *Condition) TrackFunc {
if c.Aggregation != nil {
return AggregationTrack
}
return SimpleTrack
}
// init compiles checks and sanatizes the conditon before returning itself
func (c *Condition) init(groupBy *event.TagSet) {
c.checks = c.compileChecks()
c.eventTrackers = make(map[string]*eventTracker)
// fixes issue where occurences are hit, even when the event doesn't satisify the condition
if c.Occurences < 1 {
logrus.Warnf("Occurences must be > 1. %d given. Occurences for this condition will be set to 1.", c.Occurences)
c.Occurences = 1
}
// if we have no trackers already, make an empty map of them
if c.eventTrackers == nil {
c.eventTrackers = make(map[string]*eventTracker)
}
// WindowSize must be above 2. At least one piece of data is needed for historical checks.
if c.WindowSize < 2 {
logrus.Warnf("WindowSize must be >= 1. %d given. Window size for this condition will be set to %d", c.WindowSize, DEFAULT_WINDOW_SIZE)
c.WindowSize = DEFAULT_WINDOW_SIZE
}
// decide which tracking method we will use
c.trackFunc = getTrackingFunc(c)
c.ready = true
}
updated docs
package escalation
import (
"math"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/eliothedeman/bangarang/event"
"github.com/eliothedeman/smoothie"
)
const (
DEFAULT_WINDOW_SIZE = 2 // The default size of the dataframe used in window operations
STATUS_SIZE = 10 // The default size of the dataframe used to count statuses
MIN_STD_DEV_WINDOW_SIZE = 5 // the smallets a window size can be for a standard deviation check
)
// Condition holds conditional information to check events against
type Condition struct {
Greater *float64 `json:"greater"`
Less *float64 `json:"less"`
Exactly *float64 `json:"exactly"`
StdDev bool `json:"std_dev"`
Derivative bool `json:"derivative"`
HoltWinters bool `json:"holt_winters"`
Simple bool `json:"simple"`
Occurences int `json:"occurences"`
WindowSize int `json:"window_size"`
Aggregation *Aggregation `json:"agregation"`
trackFunc TrackFunc
checks []satisfier
eventTrackers map[string]*eventTracker
sync.Mutex
ready bool
}
// Config for checks based on the aggrigation of data over a time window, instead of individual data points
type Aggregation struct {
WindowLength int `json:"window_length"`
}
type aggregator struct {
nextCloseout time.Time
}
type eventTracker struct {
df *smoothie.DataFrame
states *smoothie.DataFrame
count int
occurences int
// optional
agg *aggregator
}
func (e *eventTracker) refresh() {
e.states = smoothie.NewDataFrameFromSlice(make([]float64, STATUS_SIZE))
e.occurences = 0
}
type satisfier func(e *event.Event) bool
func (c *Condition) newTracker() *eventTracker {
et := &eventTracker{
df: smoothie.NewDataFrameFromSlice(make([]float64, c.WindowSize)),
states: smoothie.NewDataFrameFromSlice(make([]float64, STATUS_SIZE)),
}
if c.Aggregation != nil {
et.agg = &aggregator{}
}
return et
}
func (c *Condition) DoOnTracker(e *event.Event, dot func(*eventTracker)) {
// c.Lock()
et, ok := c.eventTrackers[e.IndexName()]
if !ok {
et = c.newTracker()
c.eventTrackers[e.IndexName()] = et
}
dot(et)
// c.Unlock()
}
func (c *Condition) getTracker(e *event.Event) *eventTracker {
if c.eventTrackers == nil {
c.eventTrackers = make(map[string]*eventTracker)
}
et, ok := c.eventTrackers[e.IndexName()]
if !ok {
et = c.newTracker()
c.eventTrackers[e.IndexName()] = et
}
return et
}
type TrackFunc func(c *Condition, e *event.Event) bool
func AggregationTrack(c *Condition, e *event.Event) bool {
c.DoOnTracker(e, func(t *eventTracker) {
// if we are still within the closeout, add to the current value
if time.Now().Before(t.agg.nextCloseout) {
t.df.Insert(0, t.df.Index(0)+e.Metric)
// if we are after the closeout, start a new datapoint and close out the old one
} else {
t.df.Push(e.Metric)
t.agg.nextCloseout = time.Now().Add(time.Second * time.Duration(c.Aggregation.WindowLength))
}
})
return c.OccurencesHit(e)
}
func SimpleTrack(c *Condition, e *event.Event) bool {
t := c.getTracker(e)
t.df.Push(e.Metric)
t.count += 1
return c.OccurencesHit(e)
}
// check to see if this condition should be treated as simple
func (c *Condition) isSimple() bool {
if c.Simple {
return true
}
// if nothing is set, default to simple
if !(c.StdDev || c.HoltWinters || c.Derivative) {
return true
}
return false
}
// start tracking an event, and returns if the event has hit it's occurence settings
func (c *Condition) TrackEvent(e *event.Event) bool {
return c.trackFunc(c, e)
}
// StateChanged returns true if the state of the incoming event is not the same as the last event
func (c *Condition) StateChanged(e *event.Event) bool {
t := c.getTracker(e)
if t.count == 0 && t.states.Index(t.states.Len()-1) != 0 {
return true
}
return t.states.Index(t.states.Len()-1) != t.states.Index(t.states.Len()-2)
}
// check to see if an event has hit the occurences level
func (c *Condition) OccurencesHit(e *event.Event) bool {
t := c.getTracker(e)
if c.Satisfies(e) {
t.occurences += 1
} else {
t.occurences = 0
}
if t.occurences >= c.Occurences {
t.states.Push(1)
} else {
t.states.Push(0)
}
return t.occurences >= c.Occurences
}
// check if an event satisfies a condition
func (c *Condition) Satisfies(e *event.Event) bool {
for _, check := range c.checks {
if check(e) {
return true
}
}
return false
}
// create a list of checks that the condition will use to test events
func (c *Condition) compileChecks() []satisfier {
s := []satisfier{}
// if any of the special checks are included, only one check can be implemented per condition
if !c.isSimple() {
if c.StdDev {
// check to see if the window size is large enough. The minimum 5.
if c.WindowSize < MIN_STD_DEV_WINDOW_SIZE {
logrus.Errorf("Window size %d is too small for a standard deviation check", c.WindowSize)
// stop short
return s
}
sigma := math.NaN()
// get the sigma value
if c.Greater != nil {
sigma = *c.Greater
} else {
// default to 5 sigma
sigma = 5
}
logrus.Infof("Adding standard deviation check of %f sigma", sigma)
s = append(s, func(e *event.Event) bool {
t := c.getTracker(e)
// if the count is greater than 1/4 the window size, start checking
if t.count > t.df.Len()/4 {
// if the count is greater than the window size, use the whole df
if t.count >= t.df.Len() {
return math.Abs(e.Metric-t.df.Avg()) > (sigma * t.df.StdDev())
}
// take a sublslice of populated values
return math.Abs(e.Metric-t.df.Index(t.df.Len()-2)) > (sigma * t.df.StdDev())
}
return false
})
return s
}
if c.Derivative {
check := math.NaN()
var kind uint8
// get the check value
if c.Greater != nil {
kind = 1
check = *c.Greater
} else if c.Less != nil {
kind = 2
check = *c.Less
} else if c.Exactly != nil {
kind = 3
check = *c.Exactly
} else {
logrus.Error("No derivitive type supplied. >, <, == required")
}
if kind != 0 {
logrus.Infof("Adding derivative check of %f", check)
s = append(s, func(e *event.Event) bool {
t := c.getTracker(e)
// we need to have seen at least enough events to
if t.count < t.df.Len() {
return false
}
diff := e.Metric - t.df.Index(0)
switch kind {
case 1:
return diff > check
case 2:
return diff < check
case 3:
return diff == check
}
return false
})
}
return s
}
} else {
if c.Greater != nil {
logrus.Info("Adding greater than check:", *c.Greater)
gt := *c.Greater
s = append(s, func(e *event.Event) bool {
return e.Metric > gt
})
}
if c.Less != nil {
logrus.Info("Adding less than check:", *c.Less)
lt := *c.Less
s = append(s, func(e *event.Event) bool {
return e.Metric < lt
})
}
if c.Exactly != nil {
logrus.Info("Adding exactly check:", *c.Exactly)
ex := *c.Exactly
s = append(s, func(e *event.Event) bool {
return e.Metric == ex
})
}
}
// if we are using aggregation, replace all with the aggregation form
if c.Aggregation != nil {
logrus.Infof("Converting %d checks to using aggregation", len(s))
for i := range s {
s[i] = c.wrapAggregation(s[i])
}
}
return s
}
func (c *Condition) wrapAggregation(s satisfier) satisfier {
return func(e *event.Event) bool {
// create a new event with the aggregated value
ne := *e
c.DoOnTracker(e, func(t *eventTracker) {
ne.Metric = t.df.Index(0)
})
return s(&ne)
}
}
func getTrackingFunc(c *Condition) TrackFunc {
if c.Aggregation != nil {
return AggregationTrack
}
return SimpleTrack
}
// init compiles checks and sanatizes the conditon before returning itself
func (c *Condition) init(groupBy *event.TagSet) {
c.checks = c.compileChecks()
c.eventTrackers = make(map[string]*eventTracker)
// fixes issue where occurences are hit, even when the event doesn't satisify the condition
if c.Occurences < 1 {
logrus.Warnf("Occurences must be > 1. %d given. Occurences for this condition will be set to 1.", c.Occurences)
c.Occurences = 1
}
// if we have no trackers already, make an empty map of them
if c.eventTrackers == nil {
c.eventTrackers = make(map[string]*eventTracker)
}
// WindowSize must be above 2. At least one piece of data is needed for historical checks.
if c.WindowSize < 2 {
logrus.Warnf("WindowSize must be >= 1. %d given. Window size for this condition will be set to %d", c.WindowSize, DEFAULT_WINDOW_SIZE)
c.WindowSize = DEFAULT_WINDOW_SIZE
}
// decide which tracking method we will use
c.trackFunc = getTrackingFunc(c)
c.ready = true
}
|
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Go bindings for GLib 2. Supports version 2.36 and later.
*/
package glib
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include "glib.go.h"
import "C"
import (
"errors"
"fmt"
"reflect"
"runtime"
"sync"
"unsafe"
)
var (
callbackContexts = struct {
sync.RWMutex
s []*CallbackContext
}{}
idleFnContexts = struct {
sync.RWMutex
s []*idleFnContext
}{}
)
/*
* Type conversions
*/
func gbool(b bool) C.gboolean {
if b {
return C.gboolean(1)
}
return C.gboolean(0)
}
func gobool(b C.gboolean) bool {
if b != 0 {
return true
}
return false
}
/*
* Unexported vars
*/
var nilPtrErr = errors.New("cgo returned unexpected nil pointer")
/*
* Constants
*/
// Type is a representation of GLib's GType.
type Type uint
const _TYPE_FUNDAMENTAL_SHIFT = 2
const (
TYPE_INVALID Type = iota << _TYPE_FUNDAMENTAL_SHIFT
TYPE_NONE
TYPE_INTERFACE
TYPE_CHAR
TYPE_UCHAR
TYPE_BOOLEAN
TYPE_INT
TYPE_UINT
TYPE_LONG
TYPE_ULONG
TYPE_INT64
TYPE_UINT64
TYPE_ENUM
TYPE_FLAGS
TYPE_FLOAT
TYPE_DOUBLE
TYPE_STRING
TYPE_POINTER
TYPE_BOXED
TYPE_PARAM
TYPE_OBJECT
TYPE_VARIANT
)
// UserDirectory is a representation of GLib's GUserDirectory.
type UserDirectory int
const (
USER_DIRECTORY_DESKTOP UserDirectory = C.G_USER_DIRECTORY_DESKTOP
USER_DIRECTORY_DOCUMENTS UserDirectory = C.G_USER_DIRECTORY_DOCUMENTS
USER_DIRECTORY_DOWNLOAD UserDirectory = C.G_USER_DIRECTORY_DOWNLOAD
USER_DIRECTORY_MUSIC UserDirectory = C.G_USER_DIRECTORY_MUSIC
USER_DIRECTORY_PICTURES UserDirectory = C.G_USER_DIRECTORY_PICTURES
USER_DIRECTORY_PUBLIC_SHARE UserDirectory = C.G_USER_DIRECTORY_PUBLIC_SHARE
USER_DIRECTORY_TEMPLATES UserDirectory = C.G_USER_DIRECTORY_TEMPLATES
USER_DIRECTORY_VIDEOS UserDirectory = C.G_USER_DIRECTORY_VIDEOS
)
const USER_N_DIRECTORIES int = C.G_USER_N_DIRECTORIES
/*
* Events
*/
// CallbackContext is a special type used to represent parameters
// passed to callback functions. It is in most cases unneeded, due to
// Connect() supporting closures.
type CallbackContext struct {
f interface{}
cbi unsafe.Pointer
target reflect.Value
data reflect.Value
}
// CallbackArg is a generic type representing individual parameters
// passed to callback functions.
type CallbackArg uintptr
// Target() returns the target Object connected to a callback
// function. This value should be type asserted as the type of the
// target.
func (c *CallbackContext) Target() interface{} {
return c.target.Interface()
}
// Data() returns the optional user data passed to a callback function
// connected with ConnectWithData(). This value should be type asserted
// as the type of the data.
func (c *CallbackContext) Data() interface{} {
return c.data.Interface()
}
// Arg() returns the nth argument passed to the callback function.
func (c *CallbackContext) Arg(n int) CallbackArg {
return CallbackArg(C.cbinfo_get_arg((*C.cbinfo)(c.cbi), C.int(n)))
}
// String() returns this callback argument as a Go string. Calling
// this function results in undefined behavior if the argument for the
// native C callback function is not a C string.
func (c CallbackArg) String() string {
return C.GoString((*C.char)(unsafe.Pointer(c)))
}
// Int() returns this callback argument as a Go int. Calling this
// function results in undefined behavior if the argument for the native
// C callback function is not an int.
func (c CallbackArg) Int() int {
return int(C.int(C.uintptr_t(c)))
}
// UInt() returns this callback argument as a Go uint. Calling this
// function results in undefined behavior if the argument for the native
// C callback function is not an unsigned int.
func (c CallbackArg) UInt() uint {
return uint(C.uint(C.uintptr_t(c)))
}
//export _go_glib_callback
func _go_glib_callback(cbi *C.cbinfo) {
callbackContexts.RLock()
ctx := callbackContexts.s[int(cbi.func_n)]
rf := reflect.ValueOf(ctx.f)
t := rf.Type()
fargs := make([]reflect.Value, t.NumIn())
if len(fargs) > 0 {
fargs[0] = reflect.ValueOf(ctx)
}
callbackContexts.RUnlock()
ret := rf.Call(fargs)
if len(ret) > 0 {
bret, _ := ret[0].Interface().(bool)
cbi.ret = gbool(bret)
}
}
/*
* Main event loop
*/
type idleFnContext struct {
f interface{}
args []reflect.Value
idl *C.idleinfo
}
// IdleAdd() is a wrapper around g_idle_add() and adds the function f,
// called with the arguments in datas, to run in the context of the GLib
// event loop. IdleAdd() returns a uint representing the identifier for
// this source function, and an error if f is not a function, len(datas)
// does not match the number of inputs to f, or there is a type mismatch
// between arguments.
func IdleAdd(f interface{}, datas ...interface{}) (uint, error) {
rf := reflect.ValueOf(f)
if rf.Kind() != reflect.Func {
return 0, errors.New("f is not a function")
}
t := rf.Type()
if t.NumIn() != len(datas) {
return 0, errors.New("Number of arguments do not match")
}
var vals []reflect.Value
for i := range datas {
ntharg := t.In(i)
val := reflect.ValueOf(datas[i])
if ntharg.Kind() != val.Kind() {
s := fmt.Sprint("Types of arg", i, "do not match")
return 0, errors.New(s)
}
vals = append(vals, val)
}
ctx := &idleFnContext{}
ctx.f = f
ctx.args = vals
idleFnContexts.Lock()
idleFnContexts.s = append(idleFnContexts.s, ctx)
idleFnContexts.Unlock()
idleFnContexts.RLock()
nIdleFns := len(idleFnContexts.s)
idleFnContexts.RUnlock()
idl := C._g_idle_add(C.int(nIdleFns) - 1)
ctx.idl = idl
return uint(idl.id), nil
}
//export _go_glib_idle_fn
func _go_glib_idle_fn(idl *C.idleinfo) {
idleFnContexts.RLock()
ctx := idleFnContexts.s[int(idl.func_n)]
idleFnContexts.RUnlock()
rf := reflect.ValueOf(ctx.f)
rv := rf.Call(ctx.args)
if len(rv) == 1 {
if rv[0].Kind() == reflect.Bool {
idl.ret = gbool(rv[0].Bool())
return
}
}
idl.ret = gbool(false)
}
//export _go_nil_unused_idle_ctx
func _go_nil_unused_idle_ctx(n C.int) {
idleFnContexts.Lock()
idleFnContexts.s[int(n)] = nil
idleFnContexts.Unlock()
}
/*
* Miscellaneous Utility Functions
*/
// GetUserSpecialDir() is a wrapper around g_get_user_special_dir(). A
// non-nil error is returned in the case that g_get_user_special_dir()
// returns NULL to differentiate between NULL and an empty string.
func GetUserSpecialDir(directory UserDirectory) (string, error) {
c := C.g_get_user_special_dir(C.GUserDirectory(directory))
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
/*
* GObject
*/
// IObject is an interface type implemented by Object and all types which embed
// an Object. It is meant to be used as a type for function arguments which
// require GObjects or any subclasses thereof.
type IObject interface {
toGObject() *C.GObject
}
// Object is a representation of GLib's GObject.
type Object struct {
GObject *C.GObject
}
// Native() returns a pointer to the underlying GObject.
func (v *Object) Native() *C.GObject {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGObject(p)
}
func (v *Object) toGObject() *C.GObject {
if v == nil {
return nil
}
return v.Native()
}
func (v *Object) typeFromInstance() Type {
c := C._g_type_from_instance(C.gpointer(unsafe.Pointer(v.Native())))
return Type(c)
}
// ToGObject() type converts an unsafe.Pointer as a native C GObject.
// This function is exported for visibility in other gotk3 packages and
// is not meant to be used by applications.
func ToGObject(p unsafe.Pointer) *C.GObject {
return C.toGObject(p)
}
// Ref() is a wrapper around g_object_ref().
func (v *Object) Ref() {
C.g_object_ref(C.gpointer(v.GObject))
}
// Unref() is a wrapper around g_object_unref().
func (v *Object) Unref() {
C.g_object_unref(C.gpointer(v.GObject))
}
// RefSink() is a wrapper around g_object_ref_sink().
func (v *Object) RefSink() {
C.g_object_ref_sink(C.gpointer(v.GObject))
}
// IsFloating() is a wrapper around g_object_is_floating().
func (v *Object) IsFloating() bool {
c := C.g_object_is_floating(C.gpointer(v.GObject))
return gobool(c)
}
// ForceFloating() is a wrapper around g_object_force_floating().
func (v *Object) ForceFloating() {
C.g_object_force_floating(v.GObject)
}
// StopEmission() is a wrapper around g_signal_stop_emission_by_name().
func (v *Object) StopEmission(s string) {
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
C.g_signal_stop_emission_by_name((C.gpointer)(v.GObject),
(*C.gchar)(cstr))
}
func (v *Object) connectCtx(ctx *CallbackContext, s string) int {
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
callbackContexts.RLock()
nCbCtxs := len(callbackContexts.s)
callbackContexts.RUnlock()
ctx.cbi = unsafe.Pointer(C._g_signal_connect(unsafe.Pointer(v.GObject),
(*C.gchar)(cstr), C.int(nCbCtxs)))
callbackContexts.Lock()
callbackContexts.s = append(callbackContexts.s, ctx)
callbackContexts.Unlock()
return nCbCtxs
}
// Connect() is a wrapper around g_signal_connect(). Connect()
// returns an int representing the handler id, and a non-nil error if f
// is not a function.
func (v *Object) Connect(s string, f interface{}) (int, error) {
rf := reflect.ValueOf(f)
if rf.Kind() != reflect.Func {
return 0, errors.New("f is not a function")
}
ctx := &CallbackContext{f, nil, reflect.ValueOf(v),
reflect.ValueOf(nil)}
return v.connectCtx(ctx, s), nil
}
// ConnectWithData() is a wrapper around g_signal_connect(). This
// function differs from Connect() in that it allows passing an
// additional argument for user data. This additional argument is
// usually unneeded since Connect() supports full closures, however, if f
// was not created with the necessary data in scope, it may be passed in
// this by connecting with this function.
func (v *Object) ConnectWithData(s string, f interface{}, data interface{}) int {
ctx := &CallbackContext{f, nil, reflect.ValueOf(v),
reflect.ValueOf(data)}
return v.connectCtx(ctx, s)
}
// Set() is a wrapper around g_object_set(). However, unlike
// g_object_set(), this function only sets one name value pair. Make
// multiple calls to this function to set multiple properties.
func (v *Object) Set(name string, value interface{}) error {
cstr := C.CString(name)
defer C.free(unsafe.Pointer(cstr))
if _, ok := value.(Object); ok {
value = value.(Object).GObject
}
var p unsafe.Pointer = nil
switch value.(type) {
case bool:
c := gbool(value.(bool))
p = unsafe.Pointer(&c)
case int8:
c := C.gint8(value.(int8))
p = unsafe.Pointer(&c)
case int16:
c := C.gint16(value.(int16))
p = unsafe.Pointer(&c)
case int32:
c := C.gint32(value.(int32))
p = unsafe.Pointer(&c)
case int64:
c := C.gint64(value.(int64))
p = unsafe.Pointer(&c)
case int:
c := C.gint(value.(int))
p = unsafe.Pointer(&c)
case uint8:
c := C.guchar(value.(uint8))
p = unsafe.Pointer(&c)
case uint16:
c := C.guint16(value.(uint16))
p = unsafe.Pointer(&c)
case uint32:
c := C.guint32(value.(uint32))
p = unsafe.Pointer(&c)
case uint64:
c := C.guint64(value.(uint64))
p = unsafe.Pointer(&c)
case uint:
c := C.guint(value.(uint))
p = unsafe.Pointer(&c)
case uintptr:
p = unsafe.Pointer(C.gpointer(value.(uintptr)))
case float32:
c := C.gfloat(value.(float32))
p = unsafe.Pointer(&c)
case float64:
c := C.gdouble(value.(float64))
p = unsafe.Pointer(&c)
case string:
cstr := C.CString(value.(string))
defer C.free(unsafe.Pointer(cstr))
p = unsafe.Pointer(cstr)
default:
if pv, ok := value.(unsafe.Pointer); ok {
p = pv
} else {
// Constants with separate types are not type asserted
// above, so do a runtime check here instead.
val := reflect.ValueOf(value)
switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16,
reflect.Int32, reflect.Int64:
c := C.int(val.Int())
p = unsafe.Pointer(&c)
case reflect.Uintptr:
p = unsafe.Pointer(C.gpointer(val.Pointer()))
}
}
}
// Can't call g_object_set() as it uses a variable arg list, use a
// wrapper instead
if p != nil {
C._g_object_set_one(C.gpointer(v.GObject), (*C.gchar)(cstr), p)
return nil
} else {
return errors.New("Unable to perform type conversion")
}
}
/*
* GObject Signals
*/
// Emit() is a wrapper around g_signal_emitv() and emits the signal
// specified by the string s to an Object. Arguments to callback
// functions connected to this signal must be specified in args. Emit()
// returns an interface{} which must be type asserted as the Go
// equivalent type to the return value for native C callback.
//
// Note that this code is unsafe in that the types of values in args are
// not checked against whether they are suitable for the callback.
func (v *Object) Emit(s string, args ...interface{}) (interface{}, error) {
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
// Create array of this instance and arguments
valv := C.alloc_gvalue_list(C.int(len(args)) + 1)
defer C.free(unsafe.Pointer(valv))
// Add args and valv
val, err := GValue(v)
if err != nil {
return nil, errors.New("Error converting Object to GValue: " + err.Error())
}
C.val_list_insert(valv, C.int(0), val.Native())
for i := range args {
val, err := GValue(args[i])
if err != nil {
return nil, fmt.Errorf("Error converting arg %d to GValue: %s", i, err.Error())
}
C.val_list_insert(valv, C.int(i+1), val.Native())
}
t := v.typeFromInstance()
id := C.g_signal_lookup((*C.gchar)(cstr), C.GType(t))
ret, err := ValueAlloc()
if err != nil {
return nil, errors.New("Error creating Value for return value")
}
C.g_signal_emitv(valv, id, C.GQuark(0), ret.Native())
return ret.GoValue()
}
// HandlerBlock() is a wrapper around g_signal_handler_block().
func (v *Object) HandlerBlock(callID int) {
callbackContexts.RLock()
id := C.cbinfo_get_id((*C.cbinfo)(callbackContexts.s[callID].cbi))
callbackContexts.RUnlock()
C.g_signal_handler_block((C.gpointer)(v.GObject), id)
}
// HandlerUnblock() is a wrapper around g_signal_handler_unblock().
func (v *Object) HandlerUnblock(callID int) {
callbackContexts.RLock()
id := C.cbinfo_get_id((*C.cbinfo)(callbackContexts.s[callID].cbi))
callbackContexts.RUnlock()
C.g_signal_handler_unblock((C.gpointer)(v.GObject), id)
}
// HandlerDisconnect() is a wrapper around g_signal_handler_disconnect().
func (v *Object) HandlerDisconnect(callID int) {
callbackContexts.RLock()
id := C.cbinfo_get_id((*C.cbinfo)(callbackContexts.s[callID].cbi))
callbackContexts.RUnlock()
C.g_signal_handler_disconnect((C.gpointer)(v.GObject), id)
}
/*
* GInitiallyUnowned
*/
// InitiallyUnowned is a representation of GLib's GInitiallyUnowned.
type InitiallyUnowned struct {
*Object
}
/*
* GValue
*/
// Value is a representation of GLib's GValue.
//
// Don't allocate Values on the stack or heap manually as they may not
// be properly unset when going out of scope. Instead, use ValueAlloc(),
// which will set the runtime finalizer to unset the Value after it has
// left scope.
type Value struct {
GValue C.GValue
}
// Native() returns a pointer to the underlying GValue.
func (v *Value) Native() *C.GValue {
return &v.GValue
}
// ValueAlloc() allocates a Value and sets a runtime finalizer to call
// g_value_unset() on the underlying GValue after leaving scope.
// ValueAlloc() returns a non-nil error if the allocation failed.
func ValueAlloc() (*Value, error) {
c := C._g_value_alloc()
if c == nil {
return nil, nilPtrErr
}
v := &Value{*c}
runtime.SetFinalizer(v, (*Value).unset)
return v, nil
}
// ValueInit() is a wrapper around g_value_init() and allocates and
// initializes a new Value with the Type t. A runtime finalizer is set
// to call g_value_unset() on the underlying GValue after leaving scope.
// ValueInit() returns a non-nil error if the allocation failed.
func ValueInit(t Type) (*Value, error) {
c := C._g_value_init(C.GType(t))
if c == nil {
return nil, nilPtrErr
}
v := &Value{*c}
runtime.SetFinalizer(v, (*Value).unset)
return v, nil
}
func (v *Value) unset() {
C.g_value_unset(v.Native())
}
// GetType() is a wrappr around the G_VALUE_HOLDS_GTYPE() macro and
// the g_value_get_gtype() function. GetType() returns TYPE_INVALID if v
// does not hold a Type, or otherwise returns the Type of v.
func (v *Value) GetType() Type {
c := C._g_value_holds_gtype(C.gpointer(unsafe.Pointer(v.Native())))
if gobool(c) {
c := C.g_value_get_gtype(v.Native())
return Type(c)
}
return TYPE_INVALID
}
// GValue() converts a Go type to a comparable GValue. GValue()
// returns a non-nil error if the conversion was unsuccessful.
func GValue(v interface{}) (gvalue *Value, err error) {
if v == nil {
val, err := ValueInit(TYPE_POINTER)
if err != nil {
return nil, err
}
val.SetPointer(uintptr(0)) // technically not portable
return val, nil
}
switch v.(type) {
case bool:
val, err := ValueInit(TYPE_BOOLEAN)
if err != nil {
return nil, err
}
val.SetBool(v.(bool))
return val, nil
case int8:
val, err := ValueInit(TYPE_CHAR)
if err != nil {
return nil, err
}
val.SetSChar(v.(int8))
return val, nil
case int64:
val, err := ValueInit(TYPE_INT64)
if err != nil {
return nil, err
}
val.SetInt64(v.(int64))
return val, nil
case int:
val, err := ValueInit(TYPE_INT)
if err != nil {
return nil, err
}
val.SetInt(v.(int))
return val, nil
case uint8:
val, err := ValueInit(TYPE_UCHAR)
if err != nil {
return nil, err
}
val.SetUChar(v.(uint8))
return val, nil
case uint64:
val, err := ValueInit(TYPE_UINT64)
if err != nil {
return nil, err
}
val.SetUInt64(v.(uint64))
return val, nil
case uint:
val, err := ValueInit(TYPE_UINT)
if err != nil {
return nil, err
}
val.SetUInt(v.(uint))
return val, nil
case float32:
val, err := ValueInit(TYPE_FLOAT)
if err != nil {
return nil, err
}
val.SetFloat(v.(float32))
return val, nil
case float64:
val, err := ValueInit(TYPE_DOUBLE)
if err != nil {
return nil, err
}
val.SetDouble(v.(float64))
return val, nil
case string:
val, err := ValueInit(TYPE_STRING)
if err != nil {
return nil, err
}
val.SetString(v.(string))
return val, nil
default:
if obj, ok := v.(*Object); ok {
val, err := ValueInit(TYPE_OBJECT)
if err != nil {
return nil, err
}
val.SetInstance(uintptr(unsafe.Pointer(obj.GObject)))
return val, nil
}
/* Try this since above doesn't catch constants under other types */
rval := reflect.ValueOf(v)
switch rval.Kind() {
case reflect.Int8:
val, err := ValueInit(TYPE_CHAR)
if err != nil {
return nil, err
}
val.SetSChar(int8(rval.Int()))
return val, nil
case reflect.Int16:
return nil, errors.New("Type not implemented")
case reflect.Int32:
return nil, errors.New("Type not implemented")
case reflect.Int64:
val, err := ValueInit(TYPE_INT64)
if err != nil {
return nil, err
}
val.SetInt64(rval.Int())
return val, nil
case reflect.Int:
val, err := ValueInit(TYPE_INT)
if err != nil {
return nil, err
}
val.SetInt(int(rval.Int()))
return val, nil
case reflect.Uintptr:
val, err := ValueInit(TYPE_POINTER)
if err != nil {
return nil, err
}
val.SetPointer(rval.Pointer())
return val, nil
}
}
return nil, errors.New("Type not implemented")
}
// GoValue() converts a Value to comparable Go type. GoValue()
// returns a non-nil error if the conversion was unsuccessful. The
// returned interface{} must be type asserted as the actual Go
// representation of the Value.
//
// This function is a wrapper around the many g_value_get_*()
// functions, depending on the type of the Value.
func (v *Value) GoValue() (interface{}, error) {
switch v.GetType() {
case TYPE_INVALID:
return nil, errors.New("Invalid type")
case TYPE_NONE:
return nil, nil
case TYPE_BOOLEAN:
c := C.g_value_get_boolean(v.Native())
return gobool(c), nil
case TYPE_CHAR:
c := C.g_value_get_schar(v.Native())
return int8(c), nil
case TYPE_UCHAR:
c := C.g_value_get_uchar(v.Native())
return uint8(c), nil
case TYPE_INT64:
c := C.g_value_get_int64(v.Native())
return int64(c), nil
case TYPE_INT:
c := C.g_value_get_int(v.Native())
return int(c), nil
case TYPE_UINT64:
c := C.g_value_get_uint64(v.Native())
return uint64(c), nil
case TYPE_UINT:
c := C.g_value_get_uint(v.Native())
return uint(c), nil
case TYPE_FLOAT:
c := C.g_value_get_float(v.Native())
return float32(c), nil
case TYPE_DOUBLE:
c := C.g_value_get_double(v.Native())
return float64(c), nil
case TYPE_STRING:
c := C.g_value_get_string(v.Native())
return C.GoString((*C.char)(c)), nil
default:
return nil, errors.New("Type conversion not supported")
}
}
// SetBool() is a wrapper around g_value_set_boolean().
func (v *Value) SetBool(val bool) {
C.g_value_set_boolean(v.Native(), gbool(val))
}
// SetSChar() is a wrapper around g_value_set_schar().
func (v *Value) SetSChar(val int8) {
C.g_value_set_schar(v.Native(), C.gint8(val))
}
// SetInt64() is a wrapper around g_value_set_int64().
func (v *Value) SetInt64(val int64) {
C.g_value_set_int64(v.Native(), C.gint64(val))
}
// SetInt() is a wrapper around g_value_set_int().
func (v *Value) SetInt(val int) {
C.g_value_set_int(v.Native(), C.gint(val))
}
// SetUChar() is a wrapper around g_value_set_uchar().
func (v *Value) SetUChar(val uint8) {
C.g_value_set_uchar(v.Native(), C.guchar(val))
}
// SetUInt64() is a wrapper around g_value_set_uint64().
func (v *Value) SetUInt64(val uint64) {
C.g_value_set_uint64(v.Native(), C.guint64(val))
}
// SetUInt() is a wrapper around g_value_set_uint().
func (v *Value) SetUInt(val uint) {
C.g_value_set_uint(v.Native(), C.guint(val))
}
// SetFloat() is a wrapper around g_value_set_float().
func (v *Value) SetFloat(val float32) {
C.g_value_set_float(v.Native(), C.gfloat(val))
}
// SetDouble() is a wrapper around g_value_set_double().
func (v *Value) SetDouble(val float64) {
C.g_value_set_double(v.Native(), C.gdouble(val))
}
// SetString() is a wrapper around g_value_set_string().
func (v *Value) SetString(val string) {
cstr := C.CString(val)
defer C.free(unsafe.Pointer(cstr))
C.g_value_set_string(v.Native(), (*C.gchar)(cstr))
}
// SetInstance() is a wrapper around g_value_set_instance().
func (v *Value) SetInstance(instance uintptr) {
C.g_value_set_instance(v.Native(), C.gpointer(instance))
}
// SetPointer() is a wrapper around g_value_set_pointer().
func (v *Value) SetPointer(p uintptr) {
C.g_value_set_pointer(v.Native(), C.gpointer(p))
}
// GetString() is a wrapper around g_value_get_string(). GetString()
// returns a non-nil error if g_value_get_string() returned a NULL
// pointer to distinguish between returning a NULL pointer and returning
// an empty string.
func (v *Value) GetString() (string, error) {
c := C.g_value_get_string(v.Native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
go fmt
/*
* Copyright (c) 2013 Conformal Systems <info@conformal.com>
*
* This file originated from: http://opensource.conformal.com/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Go bindings for GLib 2. Supports version 2.36 and later.
*/
package glib
// #cgo pkg-config: glib-2.0 gobject-2.0
// #include <glib.h>
// #include <glib-object.h>
// #include "glib.go.h"
import "C"
import (
"errors"
"fmt"
"reflect"
"runtime"
"sync"
"unsafe"
)
var (
callbackContexts = struct {
sync.RWMutex
s []*CallbackContext
}{}
idleFnContexts = struct {
sync.RWMutex
s []*idleFnContext
}{}
)
/*
* Type conversions
*/
func gbool(b bool) C.gboolean {
if b {
return C.gboolean(1)
}
return C.gboolean(0)
}
func gobool(b C.gboolean) bool {
if b != 0 {
return true
}
return false
}
/*
* Unexported vars
*/
var nilPtrErr = errors.New("cgo returned unexpected nil pointer")
/*
* Constants
*/
// Type is a representation of GLib's GType.
type Type uint
const _TYPE_FUNDAMENTAL_SHIFT = 2
const (
TYPE_INVALID Type = iota << _TYPE_FUNDAMENTAL_SHIFT
TYPE_NONE
TYPE_INTERFACE
TYPE_CHAR
TYPE_UCHAR
TYPE_BOOLEAN
TYPE_INT
TYPE_UINT
TYPE_LONG
TYPE_ULONG
TYPE_INT64
TYPE_UINT64
TYPE_ENUM
TYPE_FLAGS
TYPE_FLOAT
TYPE_DOUBLE
TYPE_STRING
TYPE_POINTER
TYPE_BOXED
TYPE_PARAM
TYPE_OBJECT
TYPE_VARIANT
)
// UserDirectory is a representation of GLib's GUserDirectory.
type UserDirectory int
const (
USER_DIRECTORY_DESKTOP UserDirectory = C.G_USER_DIRECTORY_DESKTOP
USER_DIRECTORY_DOCUMENTS UserDirectory = C.G_USER_DIRECTORY_DOCUMENTS
USER_DIRECTORY_DOWNLOAD UserDirectory = C.G_USER_DIRECTORY_DOWNLOAD
USER_DIRECTORY_MUSIC UserDirectory = C.G_USER_DIRECTORY_MUSIC
USER_DIRECTORY_PICTURES UserDirectory = C.G_USER_DIRECTORY_PICTURES
USER_DIRECTORY_PUBLIC_SHARE UserDirectory = C.G_USER_DIRECTORY_PUBLIC_SHARE
USER_DIRECTORY_TEMPLATES UserDirectory = C.G_USER_DIRECTORY_TEMPLATES
USER_DIRECTORY_VIDEOS UserDirectory = C.G_USER_DIRECTORY_VIDEOS
)
const USER_N_DIRECTORIES int = C.G_USER_N_DIRECTORIES
/*
* Events
*/
// CallbackContext is a special type used to represent parameters
// passed to callback functions. It is in most cases unneeded, due to
// Connect() supporting closures.
type CallbackContext struct {
f interface{}
cbi unsafe.Pointer
target reflect.Value
data reflect.Value
}
// CallbackArg is a generic type representing individual parameters
// passed to callback functions.
type CallbackArg uintptr
// Target() returns the target Object connected to a callback
// function. This value should be type asserted as the type of the
// target.
func (c *CallbackContext) Target() interface{} {
return c.target.Interface()
}
// Data() returns the optional user data passed to a callback function
// connected with ConnectWithData(). This value should be type asserted
// as the type of the data.
func (c *CallbackContext) Data() interface{} {
return c.data.Interface()
}
// Arg() returns the nth argument passed to the callback function.
func (c *CallbackContext) Arg(n int) CallbackArg {
return CallbackArg(C.cbinfo_get_arg((*C.cbinfo)(c.cbi), C.int(n)))
}
// String() returns this callback argument as a Go string. Calling
// this function results in undefined behavior if the argument for the
// native C callback function is not a C string.
func (c CallbackArg) String() string {
return C.GoString((*C.char)(unsafe.Pointer(c)))
}
// Int() returns this callback argument as a Go int. Calling this
// function results in undefined behavior if the argument for the native
// C callback function is not an int.
func (c CallbackArg) Int() int {
return int(C.int(C.uintptr_t(c)))
}
// UInt() returns this callback argument as a Go uint. Calling this
// function results in undefined behavior if the argument for the native
// C callback function is not an unsigned int.
func (c CallbackArg) UInt() uint {
return uint(C.uint(C.uintptr_t(c)))
}
//export _go_glib_callback
func _go_glib_callback(cbi *C.cbinfo) {
callbackContexts.RLock()
ctx := callbackContexts.s[int(cbi.func_n)]
rf := reflect.ValueOf(ctx.f)
t := rf.Type()
fargs := make([]reflect.Value, t.NumIn())
if len(fargs) > 0 {
fargs[0] = reflect.ValueOf(ctx)
}
callbackContexts.RUnlock()
ret := rf.Call(fargs)
if len(ret) > 0 {
bret, _ := ret[0].Interface().(bool)
cbi.ret = gbool(bret)
}
}
/*
* Main event loop
*/
type idleFnContext struct {
f interface{}
args []reflect.Value
idl *C.idleinfo
}
// IdleAdd() is a wrapper around g_idle_add() and adds the function f,
// called with the arguments in datas, to run in the context of the GLib
// event loop. IdleAdd() returns a uint representing the identifier for
// this source function, and an error if f is not a function, len(datas)
// does not match the number of inputs to f, or there is a type mismatch
// between arguments.
func IdleAdd(f interface{}, datas ...interface{}) (uint, error) {
rf := reflect.ValueOf(f)
if rf.Kind() != reflect.Func {
return 0, errors.New("f is not a function")
}
t := rf.Type()
if t.NumIn() != len(datas) {
return 0, errors.New("Number of arguments do not match")
}
var vals []reflect.Value
for i := range datas {
ntharg := t.In(i)
val := reflect.ValueOf(datas[i])
if ntharg.Kind() != val.Kind() {
s := fmt.Sprint("Types of arg", i, "do not match")
return 0, errors.New(s)
}
vals = append(vals, val)
}
ctx := &idleFnContext{}
ctx.f = f
ctx.args = vals
idleFnContexts.Lock()
idleFnContexts.s = append(idleFnContexts.s, ctx)
idleFnContexts.Unlock()
idleFnContexts.RLock()
nIdleFns := len(idleFnContexts.s)
idleFnContexts.RUnlock()
idl := C._g_idle_add(C.int(nIdleFns) - 1)
ctx.idl = idl
return uint(idl.id), nil
}
//export _go_glib_idle_fn
func _go_glib_idle_fn(idl *C.idleinfo) {
idleFnContexts.RLock()
ctx := idleFnContexts.s[int(idl.func_n)]
idleFnContexts.RUnlock()
rf := reflect.ValueOf(ctx.f)
rv := rf.Call(ctx.args)
if len(rv) == 1 {
if rv[0].Kind() == reflect.Bool {
idl.ret = gbool(rv[0].Bool())
return
}
}
idl.ret = gbool(false)
}
//export _go_nil_unused_idle_ctx
func _go_nil_unused_idle_ctx(n C.int) {
idleFnContexts.Lock()
idleFnContexts.s[int(n)] = nil
idleFnContexts.Unlock()
}
/*
* Miscellaneous Utility Functions
*/
// GetUserSpecialDir() is a wrapper around g_get_user_special_dir(). A
// non-nil error is returned in the case that g_get_user_special_dir()
// returns NULL to differentiate between NULL and an empty string.
func GetUserSpecialDir(directory UserDirectory) (string, error) {
c := C.g_get_user_special_dir(C.GUserDirectory(directory))
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
/*
* GObject
*/
// IObject is an interface type implemented by Object and all types which embed
// an Object. It is meant to be used as a type for function arguments which
// require GObjects or any subclasses thereof.
type IObject interface {
toGObject() *C.GObject
}
// Object is a representation of GLib's GObject.
type Object struct {
GObject *C.GObject
}
// Native() returns a pointer to the underlying GObject.
func (v *Object) Native() *C.GObject {
if v == nil || v.GObject == nil {
return nil
}
p := unsafe.Pointer(v.GObject)
return C.toGObject(p)
}
func (v *Object) toGObject() *C.GObject {
if v == nil {
return nil
}
return v.Native()
}
func (v *Object) typeFromInstance() Type {
c := C._g_type_from_instance(C.gpointer(unsafe.Pointer(v.Native())))
return Type(c)
}
// ToGObject() type converts an unsafe.Pointer as a native C GObject.
// This function is exported for visibility in other gotk3 packages and
// is not meant to be used by applications.
func ToGObject(p unsafe.Pointer) *C.GObject {
return C.toGObject(p)
}
// Ref() is a wrapper around g_object_ref().
func (v *Object) Ref() {
C.g_object_ref(C.gpointer(v.GObject))
}
// Unref() is a wrapper around g_object_unref().
func (v *Object) Unref() {
C.g_object_unref(C.gpointer(v.GObject))
}
// RefSink() is a wrapper around g_object_ref_sink().
func (v *Object) RefSink() {
C.g_object_ref_sink(C.gpointer(v.GObject))
}
// IsFloating() is a wrapper around g_object_is_floating().
func (v *Object) IsFloating() bool {
c := C.g_object_is_floating(C.gpointer(v.GObject))
return gobool(c)
}
// ForceFloating() is a wrapper around g_object_force_floating().
func (v *Object) ForceFloating() {
C.g_object_force_floating(v.GObject)
}
// StopEmission() is a wrapper around g_signal_stop_emission_by_name().
func (v *Object) StopEmission(s string) {
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
C.g_signal_stop_emission_by_name((C.gpointer)(v.GObject),
(*C.gchar)(cstr))
}
func (v *Object) connectCtx(ctx *CallbackContext, s string) int {
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
callbackContexts.RLock()
nCbCtxs := len(callbackContexts.s)
callbackContexts.RUnlock()
ctx.cbi = unsafe.Pointer(C._g_signal_connect(unsafe.Pointer(v.GObject),
(*C.gchar)(cstr), C.int(nCbCtxs)))
callbackContexts.Lock()
callbackContexts.s = append(callbackContexts.s, ctx)
callbackContexts.Unlock()
return nCbCtxs
}
// Connect() is a wrapper around g_signal_connect(). Connect()
// returns an int representing the handler id, and a non-nil error if f
// is not a function.
func (v *Object) Connect(s string, f interface{}) (int, error) {
rf := reflect.ValueOf(f)
if rf.Kind() != reflect.Func {
return 0, errors.New("f is not a function")
}
ctx := &CallbackContext{f, nil, reflect.ValueOf(v),
reflect.ValueOf(nil)}
return v.connectCtx(ctx, s), nil
}
// ConnectWithData() is a wrapper around g_signal_connect(). This
// function differs from Connect() in that it allows passing an
// additional argument for user data. This additional argument is
// usually unneeded since Connect() supports full closures, however, if f
// was not created with the necessary data in scope, it may be passed in
// this by connecting with this function.
func (v *Object) ConnectWithData(s string, f interface{}, data interface{}) int {
ctx := &CallbackContext{f, nil, reflect.ValueOf(v),
reflect.ValueOf(data)}
return v.connectCtx(ctx, s)
}
// Set() is a wrapper around g_object_set(). However, unlike
// g_object_set(), this function only sets one name value pair. Make
// multiple calls to this function to set multiple properties.
func (v *Object) Set(name string, value interface{}) error {
cstr := C.CString(name)
defer C.free(unsafe.Pointer(cstr))
if _, ok := value.(Object); ok {
value = value.(Object).GObject
}
var p unsafe.Pointer = nil
switch value.(type) {
case bool:
c := gbool(value.(bool))
p = unsafe.Pointer(&c)
case int8:
c := C.gint8(value.(int8))
p = unsafe.Pointer(&c)
case int16:
c := C.gint16(value.(int16))
p = unsafe.Pointer(&c)
case int32:
c := C.gint32(value.(int32))
p = unsafe.Pointer(&c)
case int64:
c := C.gint64(value.(int64))
p = unsafe.Pointer(&c)
case int:
c := C.gint(value.(int))
p = unsafe.Pointer(&c)
case uint8:
c := C.guchar(value.(uint8))
p = unsafe.Pointer(&c)
case uint16:
c := C.guint16(value.(uint16))
p = unsafe.Pointer(&c)
case uint32:
c := C.guint32(value.(uint32))
p = unsafe.Pointer(&c)
case uint64:
c := C.guint64(value.(uint64))
p = unsafe.Pointer(&c)
case uint:
c := C.guint(value.(uint))
p = unsafe.Pointer(&c)
case uintptr:
p = unsafe.Pointer(C.gpointer(value.(uintptr)))
case float32:
c := C.gfloat(value.(float32))
p = unsafe.Pointer(&c)
case float64:
c := C.gdouble(value.(float64))
p = unsafe.Pointer(&c)
case string:
cstr := C.CString(value.(string))
defer C.free(unsafe.Pointer(cstr))
p = unsafe.Pointer(cstr)
default:
if pv, ok := value.(unsafe.Pointer); ok {
p = pv
} else {
// Constants with separate types are not type asserted
// above, so do a runtime check here instead.
val := reflect.ValueOf(value)
switch val.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16,
reflect.Int32, reflect.Int64:
c := C.int(val.Int())
p = unsafe.Pointer(&c)
case reflect.Uintptr:
p = unsafe.Pointer(C.gpointer(val.Pointer()))
}
}
}
// Can't call g_object_set() as it uses a variable arg list, use a
// wrapper instead
if p != nil {
C._g_object_set_one(C.gpointer(v.GObject), (*C.gchar)(cstr), p)
return nil
} else {
return errors.New("Unable to perform type conversion")
}
}
/*
* GObject Signals
*/
// Emit() is a wrapper around g_signal_emitv() and emits the signal
// specified by the string s to an Object. Arguments to callback
// functions connected to this signal must be specified in args. Emit()
// returns an interface{} which must be type asserted as the Go
// equivalent type to the return value for native C callback.
//
// Note that this code is unsafe in that the types of values in args are
// not checked against whether they are suitable for the callback.
func (v *Object) Emit(s string, args ...interface{}) (interface{}, error) {
cstr := C.CString(s)
defer C.free(unsafe.Pointer(cstr))
// Create array of this instance and arguments
valv := C.alloc_gvalue_list(C.int(len(args)) + 1)
defer C.free(unsafe.Pointer(valv))
// Add args and valv
val, err := GValue(v)
if err != nil {
return nil, errors.New("Error converting Object to GValue: " + err.Error())
}
C.val_list_insert(valv, C.int(0), val.Native())
for i := range args {
val, err := GValue(args[i])
if err != nil {
return nil, fmt.Errorf("Error converting arg %d to GValue: %s", i, err.Error())
}
C.val_list_insert(valv, C.int(i+1), val.Native())
}
t := v.typeFromInstance()
id := C.g_signal_lookup((*C.gchar)(cstr), C.GType(t))
ret, err := ValueAlloc()
if err != nil {
return nil, errors.New("Error creating Value for return value")
}
C.g_signal_emitv(valv, id, C.GQuark(0), ret.Native())
return ret.GoValue()
}
// HandlerBlock() is a wrapper around g_signal_handler_block().
func (v *Object) HandlerBlock(callID int) {
callbackContexts.RLock()
id := C.cbinfo_get_id((*C.cbinfo)(callbackContexts.s[callID].cbi))
callbackContexts.RUnlock()
C.g_signal_handler_block((C.gpointer)(v.GObject), id)
}
// HandlerUnblock() is a wrapper around g_signal_handler_unblock().
func (v *Object) HandlerUnblock(callID int) {
callbackContexts.RLock()
id := C.cbinfo_get_id((*C.cbinfo)(callbackContexts.s[callID].cbi))
callbackContexts.RUnlock()
C.g_signal_handler_unblock((C.gpointer)(v.GObject), id)
}
// HandlerDisconnect() is a wrapper around g_signal_handler_disconnect().
func (v *Object) HandlerDisconnect(callID int) {
callbackContexts.RLock()
id := C.cbinfo_get_id((*C.cbinfo)(callbackContexts.s[callID].cbi))
callbackContexts.RUnlock()
C.g_signal_handler_disconnect((C.gpointer)(v.GObject), id)
}
/*
* GInitiallyUnowned
*/
// InitiallyUnowned is a representation of GLib's GInitiallyUnowned.
type InitiallyUnowned struct {
*Object
}
/*
* GValue
*/
// Value is a representation of GLib's GValue.
//
// Don't allocate Values on the stack or heap manually as they may not
// be properly unset when going out of scope. Instead, use ValueAlloc(),
// which will set the runtime finalizer to unset the Value after it has
// left scope.
type Value struct {
GValue C.GValue
}
// Native() returns a pointer to the underlying GValue.
func (v *Value) Native() *C.GValue {
return &v.GValue
}
// ValueAlloc() allocates a Value and sets a runtime finalizer to call
// g_value_unset() on the underlying GValue after leaving scope.
// ValueAlloc() returns a non-nil error if the allocation failed.
func ValueAlloc() (*Value, error) {
c := C._g_value_alloc()
if c == nil {
return nil, nilPtrErr
}
v := &Value{*c}
runtime.SetFinalizer(v, (*Value).unset)
return v, nil
}
// ValueInit() is a wrapper around g_value_init() and allocates and
// initializes a new Value with the Type t. A runtime finalizer is set
// to call g_value_unset() on the underlying GValue after leaving scope.
// ValueInit() returns a non-nil error if the allocation failed.
func ValueInit(t Type) (*Value, error) {
c := C._g_value_init(C.GType(t))
if c == nil {
return nil, nilPtrErr
}
v := &Value{*c}
runtime.SetFinalizer(v, (*Value).unset)
return v, nil
}
func (v *Value) unset() {
C.g_value_unset(v.Native())
}
// GetType() is a wrappr around the G_VALUE_HOLDS_GTYPE() macro and
// the g_value_get_gtype() function. GetType() returns TYPE_INVALID if v
// does not hold a Type, or otherwise returns the Type of v.
func (v *Value) GetType() Type {
c := C._g_value_holds_gtype(C.gpointer(unsafe.Pointer(v.Native())))
if gobool(c) {
c := C.g_value_get_gtype(v.Native())
return Type(c)
}
return TYPE_INVALID
}
// GValue() converts a Go type to a comparable GValue. GValue()
// returns a non-nil error if the conversion was unsuccessful.
func GValue(v interface{}) (gvalue *Value, err error) {
if v == nil {
val, err := ValueInit(TYPE_POINTER)
if err != nil {
return nil, err
}
val.SetPointer(uintptr(0)) // technically not portable
return val, nil
}
switch v.(type) {
case bool:
val, err := ValueInit(TYPE_BOOLEAN)
if err != nil {
return nil, err
}
val.SetBool(v.(bool))
return val, nil
case int8:
val, err := ValueInit(TYPE_CHAR)
if err != nil {
return nil, err
}
val.SetSChar(v.(int8))
return val, nil
case int64:
val, err := ValueInit(TYPE_INT64)
if err != nil {
return nil, err
}
val.SetInt64(v.(int64))
return val, nil
case int:
val, err := ValueInit(TYPE_INT)
if err != nil {
return nil, err
}
val.SetInt(v.(int))
return val, nil
case uint8:
val, err := ValueInit(TYPE_UCHAR)
if err != nil {
return nil, err
}
val.SetUChar(v.(uint8))
return val, nil
case uint64:
val, err := ValueInit(TYPE_UINT64)
if err != nil {
return nil, err
}
val.SetUInt64(v.(uint64))
return val, nil
case uint:
val, err := ValueInit(TYPE_UINT)
if err != nil {
return nil, err
}
val.SetUInt(v.(uint))
return val, nil
case float32:
val, err := ValueInit(TYPE_FLOAT)
if err != nil {
return nil, err
}
val.SetFloat(v.(float32))
return val, nil
case float64:
val, err := ValueInit(TYPE_DOUBLE)
if err != nil {
return nil, err
}
val.SetDouble(v.(float64))
return val, nil
case string:
val, err := ValueInit(TYPE_STRING)
if err != nil {
return nil, err
}
val.SetString(v.(string))
return val, nil
default:
if obj, ok := v.(*Object); ok {
val, err := ValueInit(TYPE_OBJECT)
if err != nil {
return nil, err
}
val.SetInstance(uintptr(unsafe.Pointer(obj.GObject)))
return val, nil
}
/* Try this since above doesn't catch constants under other types */
rval := reflect.ValueOf(v)
switch rval.Kind() {
case reflect.Int8:
val, err := ValueInit(TYPE_CHAR)
if err != nil {
return nil, err
}
val.SetSChar(int8(rval.Int()))
return val, nil
case reflect.Int16:
return nil, errors.New("Type not implemented")
case reflect.Int32:
return nil, errors.New("Type not implemented")
case reflect.Int64:
val, err := ValueInit(TYPE_INT64)
if err != nil {
return nil, err
}
val.SetInt64(rval.Int())
return val, nil
case reflect.Int:
val, err := ValueInit(TYPE_INT)
if err != nil {
return nil, err
}
val.SetInt(int(rval.Int()))
return val, nil
case reflect.Uintptr:
val, err := ValueInit(TYPE_POINTER)
if err != nil {
return nil, err
}
val.SetPointer(rval.Pointer())
return val, nil
}
}
return nil, errors.New("Type not implemented")
}
// GoValue() converts a Value to comparable Go type. GoValue()
// returns a non-nil error if the conversion was unsuccessful. The
// returned interface{} must be type asserted as the actual Go
// representation of the Value.
//
// This function is a wrapper around the many g_value_get_*()
// functions, depending on the type of the Value.
func (v *Value) GoValue() (interface{}, error) {
switch v.GetType() {
case TYPE_INVALID:
return nil, errors.New("Invalid type")
case TYPE_NONE:
return nil, nil
case TYPE_BOOLEAN:
c := C.g_value_get_boolean(v.Native())
return gobool(c), nil
case TYPE_CHAR:
c := C.g_value_get_schar(v.Native())
return int8(c), nil
case TYPE_UCHAR:
c := C.g_value_get_uchar(v.Native())
return uint8(c), nil
case TYPE_INT64:
c := C.g_value_get_int64(v.Native())
return int64(c), nil
case TYPE_INT:
c := C.g_value_get_int(v.Native())
return int(c), nil
case TYPE_UINT64:
c := C.g_value_get_uint64(v.Native())
return uint64(c), nil
case TYPE_UINT:
c := C.g_value_get_uint(v.Native())
return uint(c), nil
case TYPE_FLOAT:
c := C.g_value_get_float(v.Native())
return float32(c), nil
case TYPE_DOUBLE:
c := C.g_value_get_double(v.Native())
return float64(c), nil
case TYPE_STRING:
c := C.g_value_get_string(v.Native())
return C.GoString((*C.char)(c)), nil
default:
return nil, errors.New("Type conversion not supported")
}
}
// SetBool() is a wrapper around g_value_set_boolean().
func (v *Value) SetBool(val bool) {
C.g_value_set_boolean(v.Native(), gbool(val))
}
// SetSChar() is a wrapper around g_value_set_schar().
func (v *Value) SetSChar(val int8) {
C.g_value_set_schar(v.Native(), C.gint8(val))
}
// SetInt64() is a wrapper around g_value_set_int64().
func (v *Value) SetInt64(val int64) {
C.g_value_set_int64(v.Native(), C.gint64(val))
}
// SetInt() is a wrapper around g_value_set_int().
func (v *Value) SetInt(val int) {
C.g_value_set_int(v.Native(), C.gint(val))
}
// SetUChar() is a wrapper around g_value_set_uchar().
func (v *Value) SetUChar(val uint8) {
C.g_value_set_uchar(v.Native(), C.guchar(val))
}
// SetUInt64() is a wrapper around g_value_set_uint64().
func (v *Value) SetUInt64(val uint64) {
C.g_value_set_uint64(v.Native(), C.guint64(val))
}
// SetUInt() is a wrapper around g_value_set_uint().
func (v *Value) SetUInt(val uint) {
C.g_value_set_uint(v.Native(), C.guint(val))
}
// SetFloat() is a wrapper around g_value_set_float().
func (v *Value) SetFloat(val float32) {
C.g_value_set_float(v.Native(), C.gfloat(val))
}
// SetDouble() is a wrapper around g_value_set_double().
func (v *Value) SetDouble(val float64) {
C.g_value_set_double(v.Native(), C.gdouble(val))
}
// SetString() is a wrapper around g_value_set_string().
func (v *Value) SetString(val string) {
cstr := C.CString(val)
defer C.free(unsafe.Pointer(cstr))
C.g_value_set_string(v.Native(), (*C.gchar)(cstr))
}
// SetInstance() is a wrapper around g_value_set_instance().
func (v *Value) SetInstance(instance uintptr) {
C.g_value_set_instance(v.Native(), C.gpointer(instance))
}
// SetPointer() is a wrapper around g_value_set_pointer().
func (v *Value) SetPointer(p uintptr) {
C.g_value_set_pointer(v.Native(), C.gpointer(p))
}
// GetString() is a wrapper around g_value_get_string(). GetString()
// returns a non-nil error if g_value_get_string() returned a NULL
// pointer to distinguish between returning a NULL pointer and returning
// an empty string.
func (v *Value) GetString() (string, error) {
c := C.g_value_get_string(v.Native())
if c == nil {
return "", nilPtrErr
}
return C.GoString((*C.char)(c)), nil
}
|
package dockerfile
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"testing"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/containerd/continuity/fs/fstest"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/frontend/dockerfile/builder"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/testutil/httpserver"
"github.com/moby/buildkit/util/testutil/integration"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
func TestIntegration(t *testing.T) {
integration.Run(t, []integration.Test{
testDockerfileDirs,
testDockerfileInvalidCommand,
testDockerfileADDFromURL,
testDockerfileAddArchive,
testDockerfileScratchConfig,
testExportedHistory,
testExposeExpansion,
testUser,
testDockerignore,
testDockerfileFromGit,
testCopyChown,
testCopyWildcards,
testCopyOverrideFiles,
testMultiStageImplicitFrom,
testCopyVarSubstitution,
})
}
func testDockerfileDirs(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM busybox
COPY foo /foo2
COPY foo /
RUN echo -n bar > foo3
RUN test -f foo
RUN cmp -s foo foo2
RUN cmp -s foo foo3
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("bar"), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace := dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
cmd := sb.Cmd(args)
require.NoError(t, cmd.Run())
_, err = os.Stat(trace)
require.NoError(t, err)
// relative urls
args, trace = dfCmdArgs(".", ".")
defer os.RemoveAll(trace)
cmd = sb.Cmd(args)
cmd.Dir = dir
require.NoError(t, cmd.Run())
_, err = os.Stat(trace)
require.NoError(t, err)
// different context and dockerfile directories
dir1, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir1)
dir2, err := tmpdir(
fstest.CreateFile("foo", []byte("bar"), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir2)
args, trace = dfCmdArgs(dir2, dir1)
defer os.RemoveAll(trace)
cmd = sb.Cmd(args)
cmd.Dir = dir
require.NoError(t, cmd.Run())
_, err = os.Stat(trace)
require.NoError(t, err)
// TODO: test trace file output, cache hits, logs etc.
// TODO: output metadata about original dockerfile command in trace
}
func testDockerfileInvalidCommand(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM busybox
RUN invalidcmd
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace := dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
cmd := sb.Cmd(args)
stdout := new(bytes.Buffer)
cmd.Stderr = stdout
err = cmd.Run()
require.Error(t, err)
require.Contains(t, stdout.String(), "/bin/sh -c invalidcmd")
require.Contains(t, stdout.String(), "executor failed running")
}
func testDockerfileADDFromURL(t *testing.T, sb integration.Sandbox) {
t.Parallel()
modTime := time.Now().Add(-24 * time.Hour) // avoid falso positive with current time
resp := httpserver.Response{
Etag: identity.NewID(),
Content: []byte("content1"),
}
resp2 := httpserver.Response{
Etag: identity.NewID(),
LastModified: &modTime,
Content: []byte("content2"),
}
server := httpserver.NewTestServer(map[string]httpserver.Response{
"/foo": resp,
"/": resp2,
})
defer server.Close()
dockerfile := []byte(fmt.Sprintf(`
FROM scratch
ADD %s /dest/
`, server.URL+"/foo"))
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace := dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
destDir, err := tmpdir()
require.NoError(t, err)
defer os.RemoveAll(destDir)
cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
err = cmd.Run()
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest/foo"))
require.NoError(t, err)
require.Equal(t, []byte("content1"), dt)
// test the default properties
dockerfile = []byte(fmt.Sprintf(`
FROM scratch
ADD %s /dest/
`, server.URL+"/"))
dir, err = tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace = dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
destDir, err = tmpdir()
require.NoError(t, err)
defer os.RemoveAll(destDir)
cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
err = cmd.Run()
require.NoError(t, err)
destFile := filepath.Join(destDir, "dest/__unnamed__")
dt, err = ioutil.ReadFile(destFile)
require.NoError(t, err)
require.Equal(t, []byte("content2"), dt)
fi, err := os.Stat(destFile)
require.NoError(t, err)
require.Equal(t, fi.ModTime().Format(http.TimeFormat), modTime.Format(http.TimeFormat))
}
func testDockerfileAddArchive(t *testing.T, sb integration.Sandbox) {
t.Parallel()
buf := bytes.NewBuffer(nil)
tw := tar.NewWriter(buf)
expectedContent := []byte("content0")
err := tw.WriteHeader(&tar.Header{
Name: "foo",
Typeflag: tar.TypeReg,
Size: int64(len(expectedContent)),
Mode: 0644,
})
require.NoError(t, err)
_, err = tw.Write(expectedContent)
require.NoError(t, err)
err = tw.Close()
require.NoError(t, err)
dockerfile := []byte(`
FROM scratch
ADD t.tar /
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("t.tar", buf.Bytes(), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace := dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
destDir, err := tmpdir()
require.NoError(t, err)
defer os.RemoveAll(destDir)
cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
require.NoError(t, cmd.Run())
dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, expectedContent, dt)
// add gzip tar
buf2 := bytes.NewBuffer(nil)
gz := gzip.NewWriter(buf2)
_, err = gz.Write(buf.Bytes())
require.NoError(t, err)
err = gz.Close()
require.NoError(t, err)
dockerfile = []byte(`
FROM scratch
ADD t.tar.gz /
`)
dir, err = tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("t.tar.gz", buf2.Bytes(), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace = dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
destDir, err = tmpdir()
require.NoError(t, err)
defer os.RemoveAll(destDir)
cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
require.NoError(t, cmd.Run())
dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, expectedContent, dt)
// COPY doesn't extract
dockerfile = []byte(`
FROM scratch
COPY t.tar.gz /
`)
dir, err = tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("t.tar.gz", buf2.Bytes(), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace = dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
destDir, err = tmpdir()
require.NoError(t, err)
defer os.RemoveAll(destDir)
cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
require.NoError(t, cmd.Run())
dt, err = ioutil.ReadFile(filepath.Join(destDir, "t.tar.gz"))
require.NoError(t, err)
require.Equal(t, buf2.Bytes(), dt)
// ADD from URL doesn't extract
resp := httpserver.Response{
Etag: identity.NewID(),
Content: buf2.Bytes(),
}
server := httpserver.NewTestServer(map[string]httpserver.Response{
"/t.tar.gz": resp,
})
defer server.Close()
dockerfile = []byte(fmt.Sprintf(`
FROM scratch
ADD %s /
`, server.URL+"/t.tar.gz"))
dir, err = tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace = dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
destDir, err = tmpdir()
require.NoError(t, err)
defer os.RemoveAll(destDir)
cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
require.NoError(t, cmd.Run())
dt, err = ioutil.ReadFile(filepath.Join(destDir, "t.tar.gz"))
require.NoError(t, err)
require.Equal(t, buf2.Bytes(), dt)
}
func testDockerfileScratchConfig(t *testing.T, sb integration.Sandbox) {
var cdAddress string
if cd, ok := sb.(interface {
ContainerdAddress() string
}); !ok {
t.Skip("only for containerd worker")
} else {
cdAddress = cd.ContainerdAddress()
}
t.Parallel()
dockerfile := []byte(`
FROM scratch
ENV foo=bar
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace := dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
target := "example.com/moby/dockerfilescratch:test"
cmd := sb.Cmd(args + " --exporter=image --exporter-opt=name=" + target)
err = cmd.Run()
require.NoError(t, err)
client, err := containerd.New(cdAddress)
require.NoError(t, err)
defer client.Close()
ctx := namespaces.WithNamespace(context.Background(), "buildkit")
img, err := client.ImageService().Get(ctx, target)
require.NoError(t, err)
desc, err := img.Config(ctx, client.ContentStore(), platforms.Default())
require.NoError(t, err)
dt, err := content.ReadBlob(ctx, client.ContentStore(), desc.Digest)
require.NoError(t, err)
var ociimg ocispec.Image
err = json.Unmarshal(dt, &ociimg)
require.NoError(t, err)
require.NotEqual(t, "", ociimg.OS)
require.NotEqual(t, "", ociimg.Architecture)
require.NotEqual(t, "", ociimg.Config.WorkingDir)
require.Equal(t, "layers", ociimg.RootFS.Type)
require.Equal(t, 0, len(ociimg.RootFS.DiffIDs))
require.Equal(t, 1, len(ociimg.History))
require.Contains(t, ociimg.History[0].CreatedBy, "ENV foo=bar")
require.Equal(t, true, ociimg.History[0].EmptyLayer)
require.Contains(t, ociimg.Config.Env, "foo=bar")
require.Condition(t, func() bool {
for _, env := range ociimg.Config.Env {
if strings.HasPrefix(env, "PATH=") {
return true
}
}
return false
})
}
func testExposeExpansion(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch
ARG PORTS="3000 4000/udp"
EXPOSE $PORTS
EXPOSE 5000
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
target := "example.com/moby/dockerfileexpansion:test"
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterImage,
ExporterAttrs: map[string]string{
"name": target,
},
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
var cdAddress string
if cd, ok := sb.(interface {
ContainerdAddress() string
}); !ok {
return
} else {
cdAddress = cd.ContainerdAddress()
}
client, err := containerd.New(cdAddress)
require.NoError(t, err)
defer client.Close()
ctx := namespaces.WithNamespace(context.Background(), "buildkit")
img, err := client.ImageService().Get(ctx, target)
require.NoError(t, err)
desc, err := img.Config(ctx, client.ContentStore(), platforms.Default())
require.NoError(t, err)
dt, err := content.ReadBlob(ctx, client.ContentStore(), desc.Digest)
require.NoError(t, err)
var ociimg ocispec.Image
err = json.Unmarshal(dt, &ociimg)
require.NoError(t, err)
require.Equal(t, 3, len(ociimg.Config.ExposedPorts))
var ports []string
for p := range ociimg.Config.ExposedPorts {
ports = append(ports, p)
}
sort.Strings(ports)
require.Equal(t, "3000/tcp", ports[0])
require.Equal(t, "4000/udp", ports[1])
require.Equal(t, "5000/tcp", ports[2])
}
func testDockerignore(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch
COPY . .
`)
dockerignore := []byte(`
ba*
Dockerfile
!bay
.dockerignore
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte(`foo-contents`), 0600),
fstest.CreateFile("bar", []byte(`bar-contents`), 0600),
fstest.CreateFile("baz", []byte(`baz-contents`), 0600),
fstest.CreateFile("bay", []byte(`bay-contents`), 0600),
fstest.CreateFile(".dockerignore", dockerignore, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
_, err = os.Stat(filepath.Join(destDir, ".dockerignore"))
require.Error(t, err)
require.True(t, os.IsNotExist(err))
_, err = os.Stat(filepath.Join(destDir, "Dockerfile"))
require.Error(t, err)
require.True(t, os.IsNotExist(err))
_, err = os.Stat(filepath.Join(destDir, "bar"))
require.Error(t, err)
require.True(t, os.IsNotExist(err))
_, err = os.Stat(filepath.Join(destDir, "baz"))
require.Error(t, err)
require.True(t, os.IsNotExist(err))
dt, err = ioutil.ReadFile(filepath.Join(destDir, "bay"))
require.NoError(t, err)
require.Equal(t, "bay-contents", string(dt))
}
func testExportedHistory(t *testing.T, sb integration.Sandbox) {
t.Parallel()
// using multi-stage to test that history is scoped to one stage
dockerfile := []byte(`
FROM busybox AS base
ENV foo=bar
COPY foo /foo2
FROM busybox
COPY --from=base foo2 foo3
WORKDIR /
RUN echo bar > foo4
RUN ["ls"]
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("contents0"), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace := dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
target := "example.com/moby/dockerfilescratch:test"
cmd := sb.Cmd(args + " --exporter=image --exporter-opt=name=" + target)
require.NoError(t, cmd.Run())
// TODO: expose this test to OCI worker
var cdAddress string
if cd, ok := sb.(interface {
ContainerdAddress() string
}); !ok {
t.Skip("only for containerd worker")
} else {
cdAddress = cd.ContainerdAddress()
}
client, err := containerd.New(cdAddress)
require.NoError(t, err)
defer client.Close()
ctx := namespaces.WithNamespace(context.Background(), "buildkit")
img, err := client.ImageService().Get(ctx, target)
require.NoError(t, err)
desc, err := img.Config(ctx, client.ContentStore(), platforms.Default())
require.NoError(t, err)
dt, err := content.ReadBlob(ctx, client.ContentStore(), desc.Digest)
require.NoError(t, err)
var ociimg ocispec.Image
err = json.Unmarshal(dt, &ociimg)
require.NoError(t, err)
require.Equal(t, "layers", ociimg.RootFS.Type)
// this depends on busybox. should be ok after freezing images
require.Equal(t, 3, len(ociimg.RootFS.DiffIDs))
require.Equal(t, 6, len(ociimg.History))
require.Contains(t, ociimg.History[2].CreatedBy, "COPY foo2 foo3")
require.Equal(t, false, ociimg.History[2].EmptyLayer)
require.Contains(t, ociimg.History[3].CreatedBy, "WORKDIR /")
require.Equal(t, true, ociimg.History[3].EmptyLayer)
require.Contains(t, ociimg.History[4].CreatedBy, "echo bar > foo4")
require.Equal(t, false, ociimg.History[4].EmptyLayer)
require.Contains(t, ociimg.History[5].CreatedBy, "RUN ls")
require.Equal(t, true, ociimg.History[5].EmptyLayer)
}
func testUser(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM busybox AS base
RUN mkdir -m 0777 /out
RUN id -un > /out/rootuser
USER daemon
RUN id -un > /out/daemonuser
FROM scratch
COPY --from=base /out /
USER nobody
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "rootuser"))
require.NoError(t, err)
require.Equal(t, string(dt), "root\n")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "daemonuser"))
require.NoError(t, err)
require.Equal(t, string(dt), "daemon\n")
// test user in exported
target := "example.com/moby/dockerfileuser:test"
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterImage,
ExporterAttrs: map[string]string{
"name": target,
},
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
var cdAddress string
if cd, ok := sb.(interface {
ContainerdAddress() string
}); !ok {
return
} else {
cdAddress = cd.ContainerdAddress()
}
client, err := containerd.New(cdAddress)
require.NoError(t, err)
defer client.Close()
ctx := namespaces.WithNamespace(context.Background(), "buildkit")
img, err := client.ImageService().Get(ctx, target)
require.NoError(t, err)
desc, err := img.Config(ctx, client.ContentStore(), platforms.Default())
require.NoError(t, err)
dt, err = content.ReadBlob(ctx, client.ContentStore(), desc.Digest)
require.NoError(t, err)
var ociimg ocispec.Image
err = json.Unmarshal(dt, &ociimg)
require.NoError(t, err)
require.Equal(t, "nobody", ociimg.Config.User)
}
func testCopyChown(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM busybox AS base
RUN mkdir -m 0777 /out
COPY --chown=daemon foo /
COPY --chown=1000:nogroup bar /baz
RUN stat -c "%U %G" /foo > /out/fooowner
RUN stat -c "%u %G" /baz/sub > /out/subowner
FROM scratch
COPY --from=base /out /
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte(`foo-contents`), 0600),
fstest.CreateDir("bar", 0700),
fstest.CreateFile("bar/sub", nil, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "fooowner"))
require.NoError(t, err)
require.Equal(t, string(dt), "daemon daemon\n")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subowner"))
require.NoError(t, err)
require.Equal(t, string(dt), "1000 nogroup\n")
}
func testCopyOverrideFiles(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch AS base
COPY sub sub
COPY sub sub
COPY files/foo.go dest/foo.go
COPY files/foo.go dest/foo.go
COPY files dest
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateDir("sub", 0700),
fstest.CreateDir("sub/dir1", 0700),
fstest.CreateDir("sub/dir1/dir2", 0700),
fstest.CreateFile("sub/dir1/dir2/foo", []byte(`foo-contents`), 0600),
fstest.CreateDir("files", 0700),
fstest.CreateFile("files/foo.go", []byte(`foo.go-contents`), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "sub/dir1/dir2/foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "dest/foo.go"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo.go-contents")
}
func testCopyVarSubstitution(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch AS base
ENV FOO bar
COPY $FOO baz
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("bar", []byte(`bar-contents`), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "baz"))
require.NoError(t, err)
require.Equal(t, string(dt), "bar-contents")
}
func testCopyWildcards(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch AS base
COPY *.go /gofiles/
COPY f*.go foo2.go
COPY sub/* /subdest/
COPY sub/*/dir2/foo /subdest2/
COPY sub/*/dir2/foo /subdest3/bar
COPY . all/
COPY sub/dir1/ subdest4
COPY sub/dir1/. subdest5
COPY sub/dir1 subdest6
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo.go", []byte(`foo-contents`), 0600),
fstest.CreateFile("bar.go", []byte(`bar-contents`), 0600),
fstest.CreateDir("sub", 0700),
fstest.CreateDir("sub/dir1", 0700),
fstest.CreateDir("sub/dir1/dir2", 0700),
fstest.CreateFile("sub/dir1/dir2/foo", []byte(`foo-contents`), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "gofiles/foo.go"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "gofiles/bar.go"))
require.NoError(t, err)
require.Equal(t, string(dt), "bar-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo2.go"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest/dir1/dir2/foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest2/foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest3/bar"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "all/foo.go"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest4/dir2/foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest5/dir2/foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest6/dir2/foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
}
func testDockerfileFromGit(t *testing.T, sb integration.Sandbox) {
t.Parallel()
gitDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(gitDir)
dockerfile := `
FROM busybox AS build
RUN echo -n fromgit > foo
FROM scratch
COPY --from=build foo bar
`
err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600)
require.NoError(t, err)
err = runShell(gitDir,
"git init",
"git config --local user.email test",
"git config --local user.name test",
"git add Dockerfile",
"git commit -m initial",
"git branch first",
)
require.NoError(t, err)
dockerfile += `
COPY --from=build foo bar2
`
err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600)
require.NoError(t, err)
err = runShell(gitDir,
"git add Dockerfile",
"git commit -m second",
"git update-server-info",
)
require.NoError(t, err)
server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(gitDir, ".git"))))
defer server.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
FrontendAttrs: map[string]string{
"context": "git://" + server.URL + "/#first",
},
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar"))
require.NoError(t, err)
require.Equal(t, "fromgit", string(dt))
_, err = os.Stat(filepath.Join(destDir, "bar2"))
require.Error(t, err)
require.True(t, os.IsNotExist(err))
// second request from master branch contains both files
destDir, err = ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
FrontendAttrs: map[string]string{
"context": "git://" + server.URL + "/",
},
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
}, nil)
require.NoError(t, err)
dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar"))
require.NoError(t, err)
require.Equal(t, "fromgit", string(dt))
dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar2"))
require.NoError(t, err)
require.Equal(t, "fromgit", string(dt))
}
func testMultiStageImplicitFrom(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch
COPY --from=busybox /etc/passwd test
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "test"))
require.NoError(t, err)
require.Contains(t, string(dt), "root")
// testing masked image will load actual stage
dockerfile = []byte(`
FROM busybox AS golang
RUN mkdir /usr/bin && echo -n foo > /usr/bin/go
FROM scratch
COPY --from=golang /usr/bin/go go
`)
dir, err = tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
destDir, err = ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err = ioutil.ReadFile(filepath.Join(destDir, "go"))
require.NoError(t, err)
require.Contains(t, string(dt), "foo")
}
func tmpdir(appliers ...fstest.Applier) (string, error) {
tmpdir, err := ioutil.TempDir("", "buildkit-dockerfile")
if err != nil {
return "", err
}
if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil {
return "", err
}
return tmpdir, nil
}
func dfCmdArgs(ctx, dockerfile string) (string, string) {
traceFile := filepath.Join(os.TempDir(), "trace"+identity.NewID())
return fmt.Sprintf("build --no-progress --frontend dockerfile.v0 --local context=%s --local dockerfile=%s --trace=%s", ctx, dockerfile, traceFile), traceFile
}
func runShell(dir string, cmds ...string) error {
for _, args := range cmds {
cmd := exec.Command("sh", "-c", args)
cmd.Dir = dir
if err := cmd.Run(); err != nil {
return errors.Wrapf(err, "error running %v", args)
}
}
return nil
}
dockerfile: add tests for case insensitive targets
Signed-off-by: Tonis Tiigi <c2470c48b2d3312d61f94f18d3a1cd113d1915ad@gmail.com>
package dockerfile
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"testing"
"time"
"github.com/containerd/containerd"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/platforms"
"github.com/containerd/continuity/fs/fstest"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/frontend/dockerfile/builder"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/testutil/httpserver"
"github.com/moby/buildkit/util/testutil/integration"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
func TestIntegration(t *testing.T) {
integration.Run(t, []integration.Test{
testDockerfileDirs,
testDockerfileInvalidCommand,
testDockerfileADDFromURL,
testDockerfileAddArchive,
testDockerfileScratchConfig,
testExportedHistory,
testExposeExpansion,
testUser,
testDockerignore,
testDockerfileFromGit,
testCopyChown,
testCopyWildcards,
testCopyOverrideFiles,
testMultiStageImplicitFrom,
testCopyVarSubstitution,
testMultiStageCaseInsensitive,
})
}
func testDockerfileDirs(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM busybox
COPY foo /foo2
COPY foo /
RUN echo -n bar > foo3
RUN test -f foo
RUN cmp -s foo foo2
RUN cmp -s foo foo3
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("bar"), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace := dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
cmd := sb.Cmd(args)
require.NoError(t, cmd.Run())
_, err = os.Stat(trace)
require.NoError(t, err)
// relative urls
args, trace = dfCmdArgs(".", ".")
defer os.RemoveAll(trace)
cmd = sb.Cmd(args)
cmd.Dir = dir
require.NoError(t, cmd.Run())
_, err = os.Stat(trace)
require.NoError(t, err)
// different context and dockerfile directories
dir1, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir1)
dir2, err := tmpdir(
fstest.CreateFile("foo", []byte("bar"), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir2)
args, trace = dfCmdArgs(dir2, dir1)
defer os.RemoveAll(trace)
cmd = sb.Cmd(args)
cmd.Dir = dir
require.NoError(t, cmd.Run())
_, err = os.Stat(trace)
require.NoError(t, err)
// TODO: test trace file output, cache hits, logs etc.
// TODO: output metadata about original dockerfile command in trace
}
func testDockerfileInvalidCommand(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM busybox
RUN invalidcmd
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace := dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
cmd := sb.Cmd(args)
stdout := new(bytes.Buffer)
cmd.Stderr = stdout
err = cmd.Run()
require.Error(t, err)
require.Contains(t, stdout.String(), "/bin/sh -c invalidcmd")
require.Contains(t, stdout.String(), "executor failed running")
}
func testDockerfileADDFromURL(t *testing.T, sb integration.Sandbox) {
t.Parallel()
modTime := time.Now().Add(-24 * time.Hour) // avoid falso positive with current time
resp := httpserver.Response{
Etag: identity.NewID(),
Content: []byte("content1"),
}
resp2 := httpserver.Response{
Etag: identity.NewID(),
LastModified: &modTime,
Content: []byte("content2"),
}
server := httpserver.NewTestServer(map[string]httpserver.Response{
"/foo": resp,
"/": resp2,
})
defer server.Close()
dockerfile := []byte(fmt.Sprintf(`
FROM scratch
ADD %s /dest/
`, server.URL+"/foo"))
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace := dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
destDir, err := tmpdir()
require.NoError(t, err)
defer os.RemoveAll(destDir)
cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
err = cmd.Run()
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest/foo"))
require.NoError(t, err)
require.Equal(t, []byte("content1"), dt)
// test the default properties
dockerfile = []byte(fmt.Sprintf(`
FROM scratch
ADD %s /dest/
`, server.URL+"/"))
dir, err = tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace = dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
destDir, err = tmpdir()
require.NoError(t, err)
defer os.RemoveAll(destDir)
cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
err = cmd.Run()
require.NoError(t, err)
destFile := filepath.Join(destDir, "dest/__unnamed__")
dt, err = ioutil.ReadFile(destFile)
require.NoError(t, err)
require.Equal(t, []byte("content2"), dt)
fi, err := os.Stat(destFile)
require.NoError(t, err)
require.Equal(t, fi.ModTime().Format(http.TimeFormat), modTime.Format(http.TimeFormat))
}
func testDockerfileAddArchive(t *testing.T, sb integration.Sandbox) {
t.Parallel()
buf := bytes.NewBuffer(nil)
tw := tar.NewWriter(buf)
expectedContent := []byte("content0")
err := tw.WriteHeader(&tar.Header{
Name: "foo",
Typeflag: tar.TypeReg,
Size: int64(len(expectedContent)),
Mode: 0644,
})
require.NoError(t, err)
_, err = tw.Write(expectedContent)
require.NoError(t, err)
err = tw.Close()
require.NoError(t, err)
dockerfile := []byte(`
FROM scratch
ADD t.tar /
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("t.tar", buf.Bytes(), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace := dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
destDir, err := tmpdir()
require.NoError(t, err)
defer os.RemoveAll(destDir)
cmd := sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
require.NoError(t, cmd.Run())
dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, expectedContent, dt)
// add gzip tar
buf2 := bytes.NewBuffer(nil)
gz := gzip.NewWriter(buf2)
_, err = gz.Write(buf.Bytes())
require.NoError(t, err)
err = gz.Close()
require.NoError(t, err)
dockerfile = []byte(`
FROM scratch
ADD t.tar.gz /
`)
dir, err = tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("t.tar.gz", buf2.Bytes(), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace = dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
destDir, err = tmpdir()
require.NoError(t, err)
defer os.RemoveAll(destDir)
cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
require.NoError(t, cmd.Run())
dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, expectedContent, dt)
// COPY doesn't extract
dockerfile = []byte(`
FROM scratch
COPY t.tar.gz /
`)
dir, err = tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("t.tar.gz", buf2.Bytes(), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace = dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
destDir, err = tmpdir()
require.NoError(t, err)
defer os.RemoveAll(destDir)
cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
require.NoError(t, cmd.Run())
dt, err = ioutil.ReadFile(filepath.Join(destDir, "t.tar.gz"))
require.NoError(t, err)
require.Equal(t, buf2.Bytes(), dt)
// ADD from URL doesn't extract
resp := httpserver.Response{
Etag: identity.NewID(),
Content: buf2.Bytes(),
}
server := httpserver.NewTestServer(map[string]httpserver.Response{
"/t.tar.gz": resp,
})
defer server.Close()
dockerfile = []byte(fmt.Sprintf(`
FROM scratch
ADD %s /
`, server.URL+"/t.tar.gz"))
dir, err = tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace = dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
destDir, err = tmpdir()
require.NoError(t, err)
defer os.RemoveAll(destDir)
cmd = sb.Cmd(args + fmt.Sprintf(" --exporter=local --exporter-opt output=%s", destDir))
require.NoError(t, cmd.Run())
dt, err = ioutil.ReadFile(filepath.Join(destDir, "t.tar.gz"))
require.NoError(t, err)
require.Equal(t, buf2.Bytes(), dt)
}
func testDockerfileScratchConfig(t *testing.T, sb integration.Sandbox) {
var cdAddress string
if cd, ok := sb.(interface {
ContainerdAddress() string
}); !ok {
t.Skip("only for containerd worker")
} else {
cdAddress = cd.ContainerdAddress()
}
t.Parallel()
dockerfile := []byte(`
FROM scratch
ENV foo=bar
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace := dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
target := "example.com/moby/dockerfilescratch:test"
cmd := sb.Cmd(args + " --exporter=image --exporter-opt=name=" + target)
err = cmd.Run()
require.NoError(t, err)
client, err := containerd.New(cdAddress)
require.NoError(t, err)
defer client.Close()
ctx := namespaces.WithNamespace(context.Background(), "buildkit")
img, err := client.ImageService().Get(ctx, target)
require.NoError(t, err)
desc, err := img.Config(ctx, client.ContentStore(), platforms.Default())
require.NoError(t, err)
dt, err := content.ReadBlob(ctx, client.ContentStore(), desc.Digest)
require.NoError(t, err)
var ociimg ocispec.Image
err = json.Unmarshal(dt, &ociimg)
require.NoError(t, err)
require.NotEqual(t, "", ociimg.OS)
require.NotEqual(t, "", ociimg.Architecture)
require.NotEqual(t, "", ociimg.Config.WorkingDir)
require.Equal(t, "layers", ociimg.RootFS.Type)
require.Equal(t, 0, len(ociimg.RootFS.DiffIDs))
require.Equal(t, 1, len(ociimg.History))
require.Contains(t, ociimg.History[0].CreatedBy, "ENV foo=bar")
require.Equal(t, true, ociimg.History[0].EmptyLayer)
require.Contains(t, ociimg.Config.Env, "foo=bar")
require.Condition(t, func() bool {
for _, env := range ociimg.Config.Env {
if strings.HasPrefix(env, "PATH=") {
return true
}
}
return false
})
}
func testExposeExpansion(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch
ARG PORTS="3000 4000/udp"
EXPOSE $PORTS
EXPOSE 5000
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
target := "example.com/moby/dockerfileexpansion:test"
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterImage,
ExporterAttrs: map[string]string{
"name": target,
},
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
var cdAddress string
if cd, ok := sb.(interface {
ContainerdAddress() string
}); !ok {
return
} else {
cdAddress = cd.ContainerdAddress()
}
client, err := containerd.New(cdAddress)
require.NoError(t, err)
defer client.Close()
ctx := namespaces.WithNamespace(context.Background(), "buildkit")
img, err := client.ImageService().Get(ctx, target)
require.NoError(t, err)
desc, err := img.Config(ctx, client.ContentStore(), platforms.Default())
require.NoError(t, err)
dt, err := content.ReadBlob(ctx, client.ContentStore(), desc.Digest)
require.NoError(t, err)
var ociimg ocispec.Image
err = json.Unmarshal(dt, &ociimg)
require.NoError(t, err)
require.Equal(t, 3, len(ociimg.Config.ExposedPorts))
var ports []string
for p := range ociimg.Config.ExposedPorts {
ports = append(ports, p)
}
sort.Strings(ports)
require.Equal(t, "3000/tcp", ports[0])
require.Equal(t, "4000/udp", ports[1])
require.Equal(t, "5000/tcp", ports[2])
}
func testDockerignore(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch
COPY . .
`)
dockerignore := []byte(`
ba*
Dockerfile
!bay
.dockerignore
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte(`foo-contents`), 0600),
fstest.CreateFile("bar", []byte(`bar-contents`), 0600),
fstest.CreateFile("baz", []byte(`baz-contents`), 0600),
fstest.CreateFile("bay", []byte(`bay-contents`), 0600),
fstest.CreateFile(".dockerignore", dockerignore, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
require.NoError(t, err)
require.Equal(t, "foo-contents", string(dt))
_, err = os.Stat(filepath.Join(destDir, ".dockerignore"))
require.Error(t, err)
require.True(t, os.IsNotExist(err))
_, err = os.Stat(filepath.Join(destDir, "Dockerfile"))
require.Error(t, err)
require.True(t, os.IsNotExist(err))
_, err = os.Stat(filepath.Join(destDir, "bar"))
require.Error(t, err)
require.True(t, os.IsNotExist(err))
_, err = os.Stat(filepath.Join(destDir, "baz"))
require.Error(t, err)
require.True(t, os.IsNotExist(err))
dt, err = ioutil.ReadFile(filepath.Join(destDir, "bay"))
require.NoError(t, err)
require.Equal(t, "bay-contents", string(dt))
}
func testExportedHistory(t *testing.T, sb integration.Sandbox) {
t.Parallel()
// using multi-stage to test that history is scoped to one stage
dockerfile := []byte(`
FROM busybox AS base
ENV foo=bar
COPY foo /foo2
FROM busybox
COPY --from=base foo2 foo3
WORKDIR /
RUN echo bar > foo4
RUN ["ls"]
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("contents0"), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
args, trace := dfCmdArgs(dir, dir)
defer os.RemoveAll(trace)
target := "example.com/moby/dockerfilescratch:test"
cmd := sb.Cmd(args + " --exporter=image --exporter-opt=name=" + target)
require.NoError(t, cmd.Run())
// TODO: expose this test to OCI worker
var cdAddress string
if cd, ok := sb.(interface {
ContainerdAddress() string
}); !ok {
t.Skip("only for containerd worker")
} else {
cdAddress = cd.ContainerdAddress()
}
client, err := containerd.New(cdAddress)
require.NoError(t, err)
defer client.Close()
ctx := namespaces.WithNamespace(context.Background(), "buildkit")
img, err := client.ImageService().Get(ctx, target)
require.NoError(t, err)
desc, err := img.Config(ctx, client.ContentStore(), platforms.Default())
require.NoError(t, err)
dt, err := content.ReadBlob(ctx, client.ContentStore(), desc.Digest)
require.NoError(t, err)
var ociimg ocispec.Image
err = json.Unmarshal(dt, &ociimg)
require.NoError(t, err)
require.Equal(t, "layers", ociimg.RootFS.Type)
// this depends on busybox. should be ok after freezing images
require.Equal(t, 3, len(ociimg.RootFS.DiffIDs))
require.Equal(t, 6, len(ociimg.History))
require.Contains(t, ociimg.History[2].CreatedBy, "COPY foo2 foo3")
require.Equal(t, false, ociimg.History[2].EmptyLayer)
require.Contains(t, ociimg.History[3].CreatedBy, "WORKDIR /")
require.Equal(t, true, ociimg.History[3].EmptyLayer)
require.Contains(t, ociimg.History[4].CreatedBy, "echo bar > foo4")
require.Equal(t, false, ociimg.History[4].EmptyLayer)
require.Contains(t, ociimg.History[5].CreatedBy, "RUN ls")
require.Equal(t, true, ociimg.History[5].EmptyLayer)
}
func testUser(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM busybox AS base
RUN mkdir -m 0777 /out
RUN id -un > /out/rootuser
USER daemon
RUN id -un > /out/daemonuser
FROM scratch
COPY --from=base /out /
USER nobody
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "rootuser"))
require.NoError(t, err)
require.Equal(t, string(dt), "root\n")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "daemonuser"))
require.NoError(t, err)
require.Equal(t, string(dt), "daemon\n")
// test user in exported
target := "example.com/moby/dockerfileuser:test"
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterImage,
ExporterAttrs: map[string]string{
"name": target,
},
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
var cdAddress string
if cd, ok := sb.(interface {
ContainerdAddress() string
}); !ok {
return
} else {
cdAddress = cd.ContainerdAddress()
}
client, err := containerd.New(cdAddress)
require.NoError(t, err)
defer client.Close()
ctx := namespaces.WithNamespace(context.Background(), "buildkit")
img, err := client.ImageService().Get(ctx, target)
require.NoError(t, err)
desc, err := img.Config(ctx, client.ContentStore(), platforms.Default())
require.NoError(t, err)
dt, err = content.ReadBlob(ctx, client.ContentStore(), desc.Digest)
require.NoError(t, err)
var ociimg ocispec.Image
err = json.Unmarshal(dt, &ociimg)
require.NoError(t, err)
require.Equal(t, "nobody", ociimg.Config.User)
}
func testCopyChown(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM busybox AS base
RUN mkdir -m 0777 /out
COPY --chown=daemon foo /
COPY --chown=1000:nogroup bar /baz
RUN stat -c "%U %G" /foo > /out/fooowner
RUN stat -c "%u %G" /baz/sub > /out/subowner
FROM scratch
COPY --from=base /out /
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte(`foo-contents`), 0600),
fstest.CreateDir("bar", 0700),
fstest.CreateFile("bar/sub", nil, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "fooowner"))
require.NoError(t, err)
require.Equal(t, string(dt), "daemon daemon\n")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subowner"))
require.NoError(t, err)
require.Equal(t, string(dt), "1000 nogroup\n")
}
func testCopyOverrideFiles(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch AS base
COPY sub sub
COPY sub sub
COPY files/foo.go dest/foo.go
COPY files/foo.go dest/foo.go
COPY files dest
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateDir("sub", 0700),
fstest.CreateDir("sub/dir1", 0700),
fstest.CreateDir("sub/dir1/dir2", 0700),
fstest.CreateFile("sub/dir1/dir2/foo", []byte(`foo-contents`), 0600),
fstest.CreateDir("files", 0700),
fstest.CreateFile("files/foo.go", []byte(`foo.go-contents`), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "sub/dir1/dir2/foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "dest/foo.go"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo.go-contents")
}
func testCopyVarSubstitution(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch AS base
ENV FOO bar
COPY $FOO baz
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("bar", []byte(`bar-contents`), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "baz"))
require.NoError(t, err)
require.Equal(t, string(dt), "bar-contents")
}
func testCopyWildcards(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch AS base
COPY *.go /gofiles/
COPY f*.go foo2.go
COPY sub/* /subdest/
COPY sub/*/dir2/foo /subdest2/
COPY sub/*/dir2/foo /subdest3/bar
COPY . all/
COPY sub/dir1/ subdest4
COPY sub/dir1/. subdest5
COPY sub/dir1 subdest6
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo.go", []byte(`foo-contents`), 0600),
fstest.CreateFile("bar.go", []byte(`bar-contents`), 0600),
fstest.CreateDir("sub", 0700),
fstest.CreateDir("sub/dir1", 0700),
fstest.CreateDir("sub/dir1/dir2", 0700),
fstest.CreateFile("sub/dir1/dir2/foo", []byte(`foo-contents`), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "gofiles/foo.go"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "gofiles/bar.go"))
require.NoError(t, err)
require.Equal(t, string(dt), "bar-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "foo2.go"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest/dir1/dir2/foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest2/foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest3/bar"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "all/foo.go"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest4/dir2/foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest5/dir2/foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest6/dir2/foo"))
require.NoError(t, err)
require.Equal(t, string(dt), "foo-contents")
}
func testDockerfileFromGit(t *testing.T, sb integration.Sandbox) {
t.Parallel()
gitDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(gitDir)
dockerfile := `
FROM busybox AS build
RUN echo -n fromgit > foo
FROM scratch
COPY --from=build foo bar
`
err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600)
require.NoError(t, err)
err = runShell(gitDir,
"git init",
"git config --local user.email test",
"git config --local user.name test",
"git add Dockerfile",
"git commit -m initial",
"git branch first",
)
require.NoError(t, err)
dockerfile += `
COPY --from=build foo bar2
`
err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte(dockerfile), 0600)
require.NoError(t, err)
err = runShell(gitDir,
"git add Dockerfile",
"git commit -m second",
"git update-server-info",
)
require.NoError(t, err)
server := httptest.NewServer(http.FileServer(http.Dir(filepath.Join(gitDir, ".git"))))
defer server.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
FrontendAttrs: map[string]string{
"context": "git://" + server.URL + "/#first",
},
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar"))
require.NoError(t, err)
require.Equal(t, "fromgit", string(dt))
_, err = os.Stat(filepath.Join(destDir, "bar2"))
require.Error(t, err)
require.True(t, os.IsNotExist(err))
// second request from master branch contains both files
destDir, err = ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
FrontendAttrs: map[string]string{
"context": "git://" + server.URL + "/",
},
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
}, nil)
require.NoError(t, err)
dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar"))
require.NoError(t, err)
require.Equal(t, "fromgit", string(dt))
dt, err = ioutil.ReadFile(filepath.Join(destDir, "bar2"))
require.NoError(t, err)
require.Equal(t, "fromgit", string(dt))
}
func testMultiStageImplicitFrom(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch
COPY --from=busybox /etc/passwd test
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "test"))
require.NoError(t, err)
require.Contains(t, string(dt), "root")
// testing masked image will load actual stage
dockerfile = []byte(`
FROM busybox AS golang
RUN mkdir /usr/bin && echo -n foo > /usr/bin/go
FROM scratch
COPY --from=golang /usr/bin/go go
`)
dir, err = tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
destDir, err = ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
}, nil)
require.NoError(t, err)
dt, err = ioutil.ReadFile(filepath.Join(destDir, "go"))
require.NoError(t, err)
require.Contains(t, string(dt), "foo")
}
func testMultiStageCaseInsensitive(t *testing.T, sb integration.Sandbox) {
t.Parallel()
dockerfile := []byte(`
FROM scratch AS STAge0
COPY foo bar
FROM scratch AS staGE1
COPY --from=staGE0 bar baz
FROM scratch
COPY --from=stage1 baz bax
`)
dir, err := tmpdir(
fstest.CreateFile("Dockerfile", dockerfile, 0600),
fstest.CreateFile("foo", []byte("foo-contents"), 0600),
)
require.NoError(t, err)
defer os.RemoveAll(dir)
c, err := client.New(sb.Address())
require.NoError(t, err)
defer c.Close()
destDir, err := ioutil.TempDir("", "buildkit")
require.NoError(t, err)
defer os.RemoveAll(destDir)
err = c.Solve(context.TODO(), nil, client.SolveOpt{
Frontend: "dockerfile.v0",
Exporter: client.ExporterLocal,
ExporterOutputDir: destDir,
LocalDirs: map[string]string{
builder.LocalNameDockerfile: dir,
builder.LocalNameContext: dir,
},
FrontendAttrs: map[string]string{
"target": "Stage1",
},
}, nil)
require.NoError(t, err)
dt, err := ioutil.ReadFile(filepath.Join(destDir, "baz"))
require.NoError(t, err)
require.Contains(t, string(dt), "foo-contents")
}
func tmpdir(appliers ...fstest.Applier) (string, error) {
tmpdir, err := ioutil.TempDir("", "buildkit-dockerfile")
if err != nil {
return "", err
}
if err := fstest.Apply(appliers...).Apply(tmpdir); err != nil {
return "", err
}
return tmpdir, nil
}
func dfCmdArgs(ctx, dockerfile string) (string, string) {
traceFile := filepath.Join(os.TempDir(), "trace"+identity.NewID())
return fmt.Sprintf("build --no-progress --frontend dockerfile.v0 --local context=%s --local dockerfile=%s --trace=%s", ctx, dockerfile, traceFile), traceFile
}
func runShell(dir string, cmds ...string) error {
for _, args := range cmds {
cmd := exec.Command("sh", "-c", args)
cmd.Dir = dir
if err := cmd.Run(); err != nil {
return errors.Wrapf(err, "error running %v", args)
}
}
return nil
}
|
package tree
import (
"fmt"
"strings"
"github.com/rgeorgiev583/gonflator/translation"
)
type Configuration map[string]Setting
type Setting struct {
Key string
Value []byte
}
type ConfigurationServer interface {
GetConfiguration() Configuration
GetSetting(path string) (*Setting, error)
SetSetting(path string, value *Setting) error
}
type ConfigurationTree struct {
Prefix string
SubtreeHandlers map[string]ConfigurationServer
}
type NonexistentSubtreeHandlerError struct {
Prefix string
}
type InvalidPathError struct {
Path string
}
type TreeAssignmentError struct {
Path string
}
type NonexistentNodeError struct {
Path string
}
func (nshe *NonexistentSubtreeHandlerError) Error() string {
return fmt.Sprintf("prefix %s does not refer to an existing subtree handler for the current tree", nshe.Prefix)
}
func (ipe *InvalidPathError) Error() string {
return fmt.Sprintf("configuration tree path %s does not refer to an existing tree or setting", ipe.Path)
}
func (tae *TreeAssignmentError) Error() string {
return fmt.Sprintf("configuration tree path %s refers to a tree and so it cannot be assigned a value", tae.Path)
}
func (nne *NonexistentNodeError) Error() string {
return fmt.Sprintf("configuration tree path %s does not refer to a valid tree or setting", nne.Path)
}
func (ct *ConfigurationTree) GetConfiguration() Configuration {
mergedConf := make(Configuration)
for prefix, handler := range ct.SubtreeHandlers {
for path, value := range handler.GetConfiguration() {
mergedConf[fmt.Sprintf("%s/%s", prefix, path)] = value
}
}
return mergedConf
}
func (ct *ConfigurationTree) GetSetting(path string) (value *Setting, err error) {
for prefix, handler := range ct.SubtreeHandlers {
if !strings.HasPrefix(path, prefix) {
continue
}
value, err = handler.GetSetting(strings.TrimPrefix(path, prefix))
if err == nil {
return
}
}
return nil, &NonexistentNodeError{path}
}
func (ct *ConfigurationTree) SetSetting(path string, value *Setting) (err error) {
for prefix, handler := range ct.SubtreeHandlers {
if !strings.HasPrefix(path, prefix) {
continue
}
err = handler.SetSetting(strings.TrimPrefix(path, prefix), value)
if err == nil {
return
}
}
return &NonexistentNodeError{path}
}
func (ct *ConfigurationTree) TranslateToRdiff(diff chan<- translation.Delta) (translatedDiff <-chan translation.Delta, err error) {
deltaHandlers := make(map[string]chan translation.Delta)
go for delta := range diff {
for prefix, handler := range ct.SubtreeHandlers {
if !strings.HasPrefix(delta.OldPath, prefix) || !strings.HasPrefix(delta.NewPath, prefix) {
continue
}
translatedDelta := handler.TranslateToRdiff()
if handler, ok := deltaHandlers[prefix]; ok {
deltaHandlers[prefix] <-
}
if err == nil {
return
}
}
}
}
added (to ConfigurationServer) and implemented (in ConfigurationTree) methods for appending to and assigning from a configuration map
package tree
import (
"fmt"
"strings"
"github.com/rgeorgiev583/gonflator/translation"
)
type Configuration map[string]Setting
type Setting struct {
Key string
Value []byte
}
type ConfigurationServer interface {
GetConfiguration() Configuration
AppendToConfiguration(conf Configuration)
SetConfiguration(conf Configuration)
GetSetting(path string) (*Setting, error)
SetSetting(path string, value *Setting) error
}
type ConfigurationTree struct {
Prefix string
SubtreeHandlers map[string]ConfigurationServer
}
type NonexistentSubtreeHandlerError struct {
Prefix string
}
type InvalidPathError struct {
Path string
}
type TreeAssignmentError struct {
Path string
}
type NonexistentNodeError struct {
Path string
}
func (nshe *NonexistentSubtreeHandlerError) Error() string {
return fmt.Sprintf("prefix %s does not refer to an existing subtree handler for the current tree", nshe.Prefix)
}
func (ipe *InvalidPathError) Error() string {
return fmt.Sprintf("configuration tree path %s does not refer to an existing tree or setting", ipe.Path)
}
func (tae *TreeAssignmentError) Error() string {
return fmt.Sprintf("configuration tree path %s refers to a tree and so it cannot be assigned a value", tae.Path)
}
func (nne *NonexistentNodeError) Error() string {
return fmt.Sprintf("configuration tree path %s does not refer to a valid tree or setting", nne.Path)
}
func (ct *ConfigurationTree) GetConfiguration() Configuration {
conf := make(Configuration)
ct.AppendToConfiguration(conf)
return conf
}
func (ct *ConfigurationTree) AppendToConfiguration(conf Configuration) {
for prefix, handler := range ct.SubtreeHandlers {
for path, value := range handler.GetConfiguration() {
conf[fmt.Sprintf("%s/%s", prefix, path)] = value
}
}
}
func (ct *ConfigurationTree) SetConfiguration(conf Configuration) (err error) {
for path, value := range conf {
err = ct.SetSetting(path, value)
if err != nil {
return
}
}
return
}
func (ct *ConfigurationTree) GetSetting(path string) (value *Setting, err error) {
for prefix, handler := range ct.SubtreeHandlers {
if !strings.HasPrefix(path, prefix) {
continue
}
value, err = handler.GetSetting(strings.TrimPrefix(path, prefix))
if err == nil {
return
}
}
return nil, &NonexistentNodeError{path}
}
func (ct *ConfigurationTree) SetSetting(path string, value *Setting) (err error) {
for prefix, handler := range ct.SubtreeHandlers {
if !strings.HasPrefix(path, prefix) {
continue
}
err = handler.SetSetting(strings.TrimPrefix(path, prefix), value)
if err == nil {
return
}
}
return &NonexistentNodeError{path}
}
func (ct *ConfigurationTree) TranslateToRdiff(diff chan<- translation.Delta) (translatedDiff <-chan translation.Delta, err error) {
deltaHandlers := make(map[string]chan translation.Delta)
go for delta := range diff {
for prefix, handler := range ct.SubtreeHandlers {
if !strings.HasPrefix(delta.OldPath, prefix) || !strings.HasPrefix(delta.NewPath, prefix) {
continue
}
translatedDelta := handler.TranslateToRdiff()
if handler, ok := deltaHandlers[prefix]; ok {
deltaHandlers[prefix] <-
}
if err == nil {
return
}
}
}
}
|
package gps
import (
"bytes"
"crypto/sha256"
"fmt"
"strings"
"testing"
"text/tabwriter"
)
func TestHashInputs(t *testing.T) {
fix := basicFixtures["shared dependency with overlapping constraints"]
params := SolveParameters{
RootDir: string(fix.ds[0].n),
RootPackageTree: fix.rootTree(),
Manifest: fix.rootmanifest(),
}
s, err := Prepare(params, newdepspecSM(fix.ds, nil))
if err != nil {
t.Errorf("Unexpected error while prepping solver: %s", err)
t.FailNow()
}
dig := s.HashInputs()
h := sha256.New()
elems := []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
}
for _, v := range elems {
h.Write([]byte(v))
}
correct := h.Sum(nil)
if !bytes.Equal(dig, correct) {
t.Errorf("Hashes are not equal. Inputs:\n%s", diffHashingInputs(s, elems))
}
}
func TestHashInputsReqsIgs(t *testing.T) {
fix := basicFixtures["shared dependency with overlapping constraints"]
rm := fix.rootmanifest().(simpleRootManifest).dup()
rm.ig = map[string]bool{
"foo": true,
"bar": true,
}
params := SolveParameters{
RootDir: string(fix.ds[0].n),
RootPackageTree: fix.rootTree(),
Manifest: rm,
}
s, err := Prepare(params, newdepspecSM(fix.ds, nil))
if err != nil {
t.Errorf("Unexpected error while prepping solver: %s", err)
t.FailNow()
}
dig := s.HashInputs()
h := sha256.New()
elems := []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
"bar",
"foo",
hhOverrides,
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
}
for _, v := range elems {
h.Write([]byte(v))
}
correct := h.Sum(nil)
if !bytes.Equal(dig, correct) {
t.Errorf("Hashes are not equal. Inputs:\n%s", diffHashingInputs(s, elems))
}
// Add requires
rm.req = map[string]bool{
"baz": true,
"qux": true,
}
params.Manifest = rm
s, err = Prepare(params, newdepspecSM(fix.ds, nil))
if err != nil {
t.Errorf("Unexpected error while prepping solver: %s", err)
t.FailNow()
}
dig = s.HashInputs()
h = sha256.New()
elems = []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
"baz",
"qux",
hhIgnores,
"bar",
"foo",
hhOverrides,
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
}
for _, v := range elems {
h.Write([]byte(v))
}
correct = h.Sum(nil)
if !bytes.Equal(dig, correct) {
t.Errorf("Hashes are not equal. Inputs:\n%s", diffHashingInputs(s, elems))
}
// remove ignores, just test requires alone
rm.ig = nil
params.Manifest = rm
s, err = Prepare(params, newdepspecSM(fix.ds, nil))
if err != nil {
t.Errorf("Unexpected error while prepping solver: %s", err)
t.FailNow()
}
dig = s.HashInputs()
h = sha256.New()
elems = []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
"baz",
"qux",
hhIgnores,
hhOverrides,
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
}
for _, v := range elems {
h.Write([]byte(v))
}
correct = h.Sum(nil)
if !bytes.Equal(dig, correct) {
t.Errorf("Hashes are not equal. Inputs:\n%s", diffHashingInputs(s, elems))
}
}
func TestHashInputsOverrides(t *testing.T) {
basefix := basicFixtures["shared dependency with overlapping constraints"]
// Set up base state that we'll mutate over the course of each test
rm := basefix.rootmanifest().(simpleRootManifest).dup()
params := SolveParameters{
RootDir: string(basefix.ds[0].n),
RootPackageTree: basefix.rootTree(),
Manifest: rm,
}
table := []struct {
name string
mut func()
elems []string
}{
{
name: "override source; not imported, no deps pp",
mut: func() {
// First case - override just source, on something without
// corresponding project properties in the dependencies from
// root
rm.ovr = map[ProjectRoot]ProjectProperties{
"c": ProjectProperties{
Source: "car",
},
}
},
elems: []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
"c",
"car",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override source; required, no deps pp",
mut: func() {
// Put c into the requires list, which should make it show up under
// constraints
rm.req = map[string]bool{
"c": true,
}
},
elems: []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
"c",
"car",
"*", // Any isn't included under the override, but IS for the constraint b/c it's equivalent
hhImportsReqs,
"a",
"b",
"c",
hhIgnores,
hhOverrides,
"c",
"car",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override source; imported, no deps pp",
mut: func() {
// Take c out of requires list and put it directly in root's imports
rm.req = nil
poe := params.RootPackageTree.Packages["root"]
poe.P.Imports = []string{"a", "b", "c"}
params.RootPackageTree.Packages["root"] = poe
},
elems: []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
"c",
"car",
"*",
hhImportsReqs,
"a",
"b",
"c",
hhIgnores,
hhOverrides,
"c",
"car",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "other override constraint; not imported, no deps pp",
mut: func() {
// Override not in root, just with constraint
rm.ovr["d"] = ProjectProperties{
Constraint: NewBranch("foobranch"),
}
},
elems: []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
"c",
"car",
"*",
hhImportsReqs,
"a",
"b",
"c",
hhIgnores,
hhOverrides,
"c",
"car",
"d",
"foobranch",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override constraint; not imported, no deps pp",
mut: func() {
// Remove the "c" pkg from imports for remainder of tests
poe := params.RootPackageTree.Packages["root"]
poe.P.Imports = []string{"a", "b"}
params.RootPackageTree.Packages["root"] = poe
},
elems: []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
"c",
"car",
"d",
"foobranch",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override both; not imported, no deps pp",
mut: func() {
// Override not in root, both constraint and network name
rm.ovr["c"] = ProjectProperties{
Source: "groucho",
Constraint: NewBranch("plexiglass"),
}
},
elems: []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
"c",
"groucho",
"plexiglass",
"d",
"foobranch",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override constraint; imported, with constraint",
mut: func() {
// Override dep present in root, just constraint
rm.ovr["a"] = ProjectProperties{
Constraint: NewVersion("fluglehorn"),
}
},
elems: []string{
hhConstraints,
"a",
"fluglehorn",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
"a",
"fluglehorn",
"c",
"groucho",
"plexiglass",
"d",
"foobranch",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override source; imported, with constraint",
mut: func() {
// Override in root, only network name
rm.ovr["a"] = ProjectProperties{
Source: "nota",
}
},
elems: []string{
hhConstraints,
"a",
"nota",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
"a",
"nota",
"c",
"groucho",
"plexiglass",
"d",
"foobranch",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override both; imported, with constraint",
mut: func() {
// Override in root, network name and constraint
rm.ovr["a"] = ProjectProperties{
Source: "nota",
Constraint: NewVersion("fluglehorn"),
}
},
elems: []string{
hhConstraints,
"a",
"nota",
"fluglehorn",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
"a",
"nota",
"fluglehorn",
"c",
"groucho",
"plexiglass",
"d",
"foobranch",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
}
for _, fix := range table {
fix.mut()
params.Manifest = rm
s, err := Prepare(params, newdepspecSM(basefix.ds, nil))
if err != nil {
t.Errorf("(fix: %s) Unexpected error while prepping solver: %s", fix.name, err)
t.FailNow()
}
h := sha256.New()
for _, v := range fix.elems {
h.Write([]byte(v))
}
if !bytes.Equal(s.HashInputs(), h.Sum(nil)) {
t.Errorf("(fix: %s) Hashes are not equal. Inputs:\n%s", fix.name, diffHashingInputs(s, fix.elems))
}
}
}
func diffHashingInputs(s Solver, wnt []string) string {
actual := HashingInputsAsString(s)
got := strings.Split(actual, "\n")
lg, lw := len(got), len(wnt)
var buf bytes.Buffer
tw := tabwriter.NewWriter(&buf, 4, 4, 2, ' ', 0)
fmt.Fprintln(tw, " (GOT) \t (WANT) \t")
if lg == lw {
// same length makes the loop pretty straightforward
for i := 0; i < lg; i++ {
fmt.Fprintf(tw, "%s\t%s\t\n", got[i], wnt[i])
}
} else if lg > lw {
offset := 0
for i := 0; i < lg; i++ {
if lw <= i-offset {
fmt.Fprintf(tw, "%s\t\t\n", got[i])
} else if got[i] != wnt[i-offset] && i+1 < lg && got[i+1] == wnt[i-offset] {
// if the next slot is a match, realign by skipping this one and
// bumping the offset
fmt.Fprintf(tw, "%s\t\t\n", got[i])
offset++
} else {
fmt.Fprintf(tw, "%s\t%s\t\n", got[i], wnt[i-offset])
}
}
} else {
offset := 0
for i := 0; i < lw; i++ {
if lg <= i-offset {
fmt.Fprintf(tw, "\t%s\t\n", wnt[i])
} else if got[i-offset] != wnt[i] && i+1 < lw && got[i-offset] == wnt[i+1] {
// if the next slot is a match, realign by skipping this one and
// bumping the offset
fmt.Fprintf(tw, "\t%s\t\n", wnt[i])
offset++
} else {
fmt.Fprintf(tw, "%s\t%s\t\n", got[i-offset], wnt[i])
}
}
}
tw.Flush()
return buf.String()
}
Add diff-ish indicators to hash diff output
Makes it easier to see problem spots on a quick scan.
package gps
import (
"bytes"
"crypto/sha256"
"fmt"
"strings"
"testing"
"text/tabwriter"
)
func TestHashInputs(t *testing.T) {
fix := basicFixtures["shared dependency with overlapping constraints"]
params := SolveParameters{
RootDir: string(fix.ds[0].n),
RootPackageTree: fix.rootTree(),
Manifest: fix.rootmanifest(),
}
s, err := Prepare(params, newdepspecSM(fix.ds, nil))
if err != nil {
t.Errorf("Unexpected error while prepping solver: %s", err)
t.FailNow()
}
dig := s.HashInputs()
h := sha256.New()
elems := []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
}
for _, v := range elems {
h.Write([]byte(v))
}
correct := h.Sum(nil)
if !bytes.Equal(dig, correct) {
t.Errorf("Hashes are not equal. Inputs:\n%s", diffHashingInputs(s, elems))
}
}
func TestHashInputsReqsIgs(t *testing.T) {
fix := basicFixtures["shared dependency with overlapping constraints"]
rm := fix.rootmanifest().(simpleRootManifest).dup()
rm.ig = map[string]bool{
"foo": true,
"bar": true,
}
params := SolveParameters{
RootDir: string(fix.ds[0].n),
RootPackageTree: fix.rootTree(),
Manifest: rm,
}
s, err := Prepare(params, newdepspecSM(fix.ds, nil))
if err != nil {
t.Errorf("Unexpected error while prepping solver: %s", err)
t.FailNow()
}
dig := s.HashInputs()
h := sha256.New()
elems := []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
"bar",
"foo",
hhOverrides,
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
}
for _, v := range elems {
h.Write([]byte(v))
}
correct := h.Sum(nil)
if !bytes.Equal(dig, correct) {
t.Errorf("Hashes are not equal. Inputs:\n%s", diffHashingInputs(s, elems))
}
// Add requires
rm.req = map[string]bool{
"baz": true,
"qux": true,
}
params.Manifest = rm
s, err = Prepare(params, newdepspecSM(fix.ds, nil))
if err != nil {
t.Errorf("Unexpected error while prepping solver: %s", err)
t.FailNow()
}
dig = s.HashInputs()
h = sha256.New()
elems = []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
"baz",
"qux",
hhIgnores,
"bar",
"foo",
hhOverrides,
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
}
for _, v := range elems {
h.Write([]byte(v))
}
correct = h.Sum(nil)
if !bytes.Equal(dig, correct) {
t.Errorf("Hashes are not equal. Inputs:\n%s", diffHashingInputs(s, elems))
}
// remove ignores, just test requires alone
rm.ig = nil
params.Manifest = rm
s, err = Prepare(params, newdepspecSM(fix.ds, nil))
if err != nil {
t.Errorf("Unexpected error while prepping solver: %s", err)
t.FailNow()
}
dig = s.HashInputs()
h = sha256.New()
elems = []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
"baz",
"qux",
hhIgnores,
hhOverrides,
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
}
for _, v := range elems {
h.Write([]byte(v))
}
correct = h.Sum(nil)
if !bytes.Equal(dig, correct) {
t.Errorf("Hashes are not equal. Inputs:\n%s", diffHashingInputs(s, elems))
}
}
func TestHashInputsOverrides(t *testing.T) {
basefix := basicFixtures["shared dependency with overlapping constraints"]
// Set up base state that we'll mutate over the course of each test
rm := basefix.rootmanifest().(simpleRootManifest).dup()
params := SolveParameters{
RootDir: string(basefix.ds[0].n),
RootPackageTree: basefix.rootTree(),
Manifest: rm,
}
table := []struct {
name string
mut func()
elems []string
}{
{
name: "override source; not imported, no deps pp",
mut: func() {
// First case - override just source, on something without
// corresponding project properties in the dependencies from
// root
rm.ovr = map[ProjectRoot]ProjectProperties{
"c": ProjectProperties{
Source: "car",
},
}
},
elems: []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
"c",
"car",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override source; required, no deps pp",
mut: func() {
// Put c into the requires list, which should make it show up under
// constraints
rm.req = map[string]bool{
"c": true,
}
},
elems: []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
"c",
"car",
"*", // Any isn't included under the override, but IS for the constraint b/c it's equivalent
hhImportsReqs,
"a",
"b",
"c",
hhIgnores,
hhOverrides,
"c",
"car",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override source; imported, no deps pp",
mut: func() {
// Take c out of requires list and put it directly in root's imports
rm.req = nil
poe := params.RootPackageTree.Packages["root"]
poe.P.Imports = []string{"a", "b", "c"}
params.RootPackageTree.Packages["root"] = poe
},
elems: []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
"c",
"car",
"*",
hhImportsReqs,
"a",
"b",
"c",
hhIgnores,
hhOverrides,
"c",
"car",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "other override constraint; not imported, no deps pp",
mut: func() {
// Override not in root, just with constraint
rm.ovr["d"] = ProjectProperties{
Constraint: NewBranch("foobranch"),
}
},
elems: []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
"c",
"car",
"*",
hhImportsReqs,
"a",
"b",
"c",
hhIgnores,
hhOverrides,
"c",
"car",
"d",
"foobranch",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override constraint; not imported, no deps pp",
mut: func() {
// Remove the "c" pkg from imports for remainder of tests
poe := params.RootPackageTree.Packages["root"]
poe.P.Imports = []string{"a", "b"}
params.RootPackageTree.Packages["root"] = poe
},
elems: []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
"c",
"car",
"d",
"foobranch",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override both; not imported, no deps pp",
mut: func() {
// Override not in root, both constraint and network name
rm.ovr["c"] = ProjectProperties{
Source: "groucho",
Constraint: NewBranch("plexiglass"),
}
},
elems: []string{
hhConstraints,
"a",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
"c",
"groucho",
"plexiglass",
"d",
"foobranch",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override constraint; imported, with constraint",
mut: func() {
// Override dep present in root, just constraint
rm.ovr["a"] = ProjectProperties{
Constraint: NewVersion("fluglehorn"),
}
},
elems: []string{
hhConstraints,
"a",
"fluglehorn",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
"a",
"fluglehorn",
"c",
"groucho",
"plexiglass",
"d",
"foobranch",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override source; imported, with constraint",
mut: func() {
// Override in root, only network name
rm.ovr["a"] = ProjectProperties{
Source: "nota",
}
},
elems: []string{
hhConstraints,
"a",
"nota",
"1.0.0",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
"a",
"nota",
"c",
"groucho",
"plexiglass",
"d",
"foobranch",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
{
name: "override both; imported, with constraint",
mut: func() {
// Override in root, network name and constraint
rm.ovr["a"] = ProjectProperties{
Source: "nota",
Constraint: NewVersion("fluglehorn"),
}
},
elems: []string{
hhConstraints,
"a",
"nota",
"fluglehorn",
"b",
"1.0.0",
hhImportsReqs,
"a",
"b",
hhIgnores,
hhOverrides,
"a",
"nota",
"fluglehorn",
"c",
"groucho",
"plexiglass",
"d",
"foobranch",
hhAnalyzer,
"depspec-sm-builtin",
"1.0.0",
},
},
}
for _, fix := range table {
fix.mut()
params.Manifest = rm
s, err := Prepare(params, newdepspecSM(basefix.ds, nil))
if err != nil {
t.Errorf("(fix: %s) Unexpected error while prepping solver: %s", fix.name, err)
t.FailNow()
}
h := sha256.New()
for _, v := range fix.elems {
h.Write([]byte(v))
}
if !bytes.Equal(s.HashInputs(), h.Sum(nil)) {
t.Errorf("(fix: %s) Hashes are not equal. Inputs:\n%s", fix.name, diffHashingInputs(s, fix.elems))
}
}
}
func diffHashingInputs(s Solver, wnt []string) string {
actual := HashingInputsAsString(s)
got := strings.Split(actual, "\n")
// got has a trailing empty, add that to wnt
wnt = append(wnt, "")
lg, lw := len(got), len(wnt)
var buf bytes.Buffer
tw := tabwriter.NewWriter(&buf, 4, 4, 2, ' ', 0)
fmt.Fprintln(tw, " (GOT) \t (WANT) \t")
lmiss, rmiss := ">>>>>>>>>>", "<<<<<<<<<<"
if lg == lw {
// same length makes the loop pretty straightforward
for i := 0; i < lg; i++ {
fmt.Fprintf(tw, "%s\t%s\t\n", got[i], wnt[i])
}
} else if lg > lw {
offset := 0
for i := 0; i < lg; i++ {
if lw <= i-offset {
fmt.Fprintf(tw, "%s\t%s\t\n", got[i], rmiss)
} else if got[i] != wnt[i-offset] && i+1 < lg && got[i+1] == wnt[i-offset] {
// if the next slot is a match, realign by skipping this one and
// bumping the offset
fmt.Fprintf(tw, "%s\t%s\t\n", got[i], rmiss)
offset++
} else {
fmt.Fprintf(tw, "%s\t%s\t\n", got[i], wnt[i-offset])
}
}
} else {
offset := 0
for i := 0; i < lw; i++ {
if lg <= i-offset {
fmt.Fprintf(tw, "%s\t%s\t\n", lmiss, wnt[i])
} else if got[i-offset] != wnt[i] && i+1 < lw && got[i-offset] == wnt[i+1] {
// if the next slot is a match, realign by skipping this one and
// bumping the offset
fmt.Fprintf(tw, "%s\t%s\t\n", lmiss, wnt[i])
offset++
} else {
fmt.Fprintf(tw, "%s\t%s\t\n", got[i-offset], wnt[i])
}
}
}
tw.Flush()
return buf.String()
}
|
package aws
import (
"fmt"
"log"
"strconv"
"github.com/hashicorp/terraform/flatmap"
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/helper/diff"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/goamz/autoscaling"
)
func resource_aws_autoscaling_group_create(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
autoscalingconn := p.autoscalingconn
// Merge the diff into the state so that we have all the attributes
// properly.
rs := s.MergeDiff(d)
var err error
autoScalingGroupOpts := autoscaling.CreateAutoScalingGroup{}
if rs.Attributes["min_size"] != "" {
autoScalingGroupOpts.MinSize, err = strconv.Atoi(rs.Attributes["min_size"])
autoScalingGroupOpts.SetMinSize = true
}
if rs.Attributes["max_size"] != "" {
autoScalingGroupOpts.MaxSize, err = strconv.Atoi(rs.Attributes["max_size"])
autoScalingGroupOpts.SetMaxSize = true
}
if rs.Attributes["default_cooldown"] != "" {
autoScalingGroupOpts.DefaultCooldown, err = strconv.Atoi(rs.Attributes["default_cooldown"])
autoScalingGroupOpts.SetDefaultCooldown = true
}
if rs.Attributes["desired_capacity"] != "" {
autoScalingGroupOpts.DesiredCapacity, err = strconv.Atoi(rs.Attributes["desired_capacity"])
autoScalingGroupOpts.SetDesiredCapacity = true
}
if rs.Attributes["health_check_grace_period"] != "" {
autoScalingGroupOpts.HealthCheckGracePeriod, err = strconv.Atoi(rs.Attributes["health_check_grace_period"])
autoScalingGroupOpts.SetHealthCheckGracePeriod = true
}
if err != nil {
return nil, fmt.Errorf("Error parsing configuration: %s", err)
}
if _, ok := rs.Attributes["availability_zones.#"]; ok {
autoScalingGroupOpts.AvailZone = expandStringList(flatmap.Expand(
rs.Attributes, "availability_zones").([]interface{}))
}
if _, ok := rs.Attributes["load_balancers.#"]; ok {
autoScalingGroupOpts.LoadBalancerNames = expandStringList(flatmap.Expand(
rs.Attributes, "load_balancers").([]interface{}))
}
if _, ok := rs.Attributes["vpc_identifier.#"]; ok {
autoScalingGroupOpts.VPCZoneIdentifier = expandStringList(flatmap.Expand(
rs.Attributes, "vpc_identifier").([]interface{}))
}
autoScalingGroupOpts.Name = rs.Attributes["name"]
autoScalingGroupOpts.HealthCheckType = rs.Attributes["health_check_type"]
autoScalingGroupOpts.LaunchConfigurationName = rs.Attributes["launch_configuration"]
log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", autoScalingGroupOpts)
_, err = autoscalingconn.CreateAutoScalingGroup(&autoScalingGroupOpts)
if err != nil {
return nil, fmt.Errorf("Error creating AutoScaling Group: %s", err)
}
rs.ID = rs.Attributes["name"]
rs.Dependencies = []terraform.ResourceDependency{
terraform.ResourceDependency{ID: rs.Attributes["launch_configuration"]},
}
log.Printf("[INFO] AutoScaling Group ID: %s", rs.ID)
g, err := resource_aws_autoscaling_group_retrieve(rs.ID, autoscalingconn)
if err != nil {
return rs, err
}
return resource_aws_autoscaling_group_update_state(rs, g)
}
func resource_aws_autoscaling_group_update(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
autoscalingconn := p.autoscalingconn
rs := s.MergeDiff(d)
opts := autoscaling.UpdateAutoScalingGroup{
Name: rs.ID,
}
var err error
if _, ok := d.Attributes["min_size"]; ok {
opts.MinSize, err = strconv.Atoi(rs.Attributes["min_size"])
opts.SetMinSize = true
}
if _, ok := d.Attributes["max_size"]; ok {
opts.MaxSize, err = strconv.Atoi(rs.Attributes["max_size"])
opts.SetMaxSize = true
}
if err != nil {
return s, fmt.Errorf("Error parsing configuration: %s", err)
}
log.Printf("[DEBUG] AutoScaling Group update configuration: %#v", opts)
_, err = autoscalingconn.UpdateAutoScalingGroup(&opts)
if err != nil {
return rs, fmt.Errorf("Error updating AutoScaling group: %s", err)
}
g, err := resource_aws_autoscaling_group_retrieve(rs.ID, autoscalingconn)
if err != nil {
return rs, err
}
return resource_aws_autoscaling_group_update_state(rs, g)
}
func resource_aws_autoscaling_group_destroy(
s *terraform.ResourceState,
meta interface{}) error {
p := meta.(*ResourceProvider)
autoscalingconn := p.autoscalingconn
log.Printf("[DEBUG] AutoScaling Group destroy: %v", s.ID)
deleteopts := autoscaling.DeleteAutoScalingGroup{Name: s.ID}
// You can force an autoscaling group to delete
// even if it's in the process of scaling a resource.
// Normally, you would set the min-size and max-size to 0,0
// and then delete the group. This bypasses that and leaves
// resources potentially dangling.
if s.Attributes["force_delete"] != "" {
deleteopts.ForceDelete = true
}
_, err := autoscalingconn.DeleteAutoScalingGroup(&deleteopts)
if err != nil {
autoscalingerr, ok := err.(*autoscaling.Error)
if ok && autoscalingerr.Code == "InvalidGroup.NotFound" {
return nil
}
return err
}
return nil
}
func resource_aws_autoscaling_group_refresh(
s *terraform.ResourceState,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
autoscalingconn := p.autoscalingconn
g, err := resource_aws_autoscaling_group_retrieve(s.ID, autoscalingconn)
if err != nil {
return s, err
}
return resource_aws_autoscaling_group_update_state(s, g)
}
func resource_aws_autoscaling_group_diff(
s *terraform.ResourceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.ResourceDiff, error) {
b := &diff.ResourceBuilder{
Attrs: map[string]diff.AttrType{
"availability_zone": diff.AttrTypeCreate,
"default_cooldown": diff.AttrTypeCreate,
"desired_capacity": diff.AttrTypeCreate,
"force_delete": diff.AttrTypeCreate,
"health_check_grace_period": diff.AttrTypeCreate,
"health_check_type": diff.AttrTypeCreate,
"launch_configuration": diff.AttrTypeCreate,
"load_balancers": diff.AttrTypeCreate,
"name": diff.AttrTypeCreate,
"vpc_zone_identifier": diff.AttrTypeCreate,
"max_size": diff.AttrTypeUpdate,
"min_size": diff.AttrTypeUpdate,
},
ComputedAttrs: []string{
"health_check_grace_period",
"health_check_type",
"default_cooldown",
"vpc_zone_identifier",
"desired_capacity",
"force_delete",
},
}
return b.Diff(s, c)
}
func resource_aws_autoscaling_group_update_state(
s *terraform.ResourceState,
g *autoscaling.AutoScalingGroup) (*terraform.ResourceState, error) {
s.Attributes["min_size"] = strconv.Itoa(g.MinSize)
s.Attributes["max_size"] = strconv.Itoa(g.MaxSize)
s.Attributes["default_cooldown"] = strconv.Itoa(g.DefaultCooldown)
s.Attributes["name"] = g.Name
s.Attributes["desired_capacity"] = strconv.Itoa(g.DesiredCapacity)
s.Attributes["health_check_grace_period"] = strconv.Itoa(g.HealthCheckGracePeriod)
s.Attributes["health_check_type"] = g.HealthCheckType
s.Attributes["launch_configuration"] = g.LaunchConfigurationName
s.Attributes["vpc_zone_identifier"] = g.VPCZoneIdentifier
// Flatten our group values
toFlatten := make(map[string]interface{})
// Special case the return of amazons load balancers names in the XML having
// a blank entry
if len(g.LoadBalancerNames) > 0 && g.LoadBalancerNames[0].LoadBalancerName != "" {
toFlatten["load_balancers"] = flattenLoadBalancers(g.LoadBalancerNames)
}
toFlatten["availability_zones"] = flattenAvailabilityZones(g.AvailabilityZones)
for k, v := range flatmap.Flatten(toFlatten) {
s.Attributes[k] = v
}
return s, nil
}
// Returns a single group by its ID
func resource_aws_autoscaling_group_retrieve(id string, autoscalingconn *autoscaling.AutoScaling) (*autoscaling.AutoScalingGroup, error) {
describeOpts := autoscaling.DescribeAutoScalingGroups{
Names: []string{id},
}
log.Printf("[DEBUG] AutoScaling Group describe configuration: %#v", describeOpts)
describeGroups, err := autoscalingconn.DescribeAutoScalingGroups(&describeOpts)
if err != nil {
return nil, fmt.Errorf("Error retrieving AutoScaling groups: %s", err)
}
// Verify AWS returned our sg
if len(describeGroups.AutoScalingGroups) != 1 ||
describeGroups.AutoScalingGroups[0].Name != id {
if err != nil {
return nil, fmt.Errorf("Unable to find AutoScaling group: %#v", describeGroups.AutoScalingGroups)
}
}
g := describeGroups.AutoScalingGroups[0]
return &g, nil
}
func resource_aws_autoscaling_group_validation() *config.Validator {
return &config.Validator{
Required: []string{
"name",
"max_size",
"min_size",
"availability_zones.*",
"launch_configuration",
},
Optional: []string{
"health_check_grace_period",
"health_check_type",
"desired_capacity",
"force_delete",
"load_balancers.*",
},
}
}
allow vpc_zone_identifier in aws_autoscaling_group
package aws
import (
"fmt"
"log"
"strconv"
"github.com/hashicorp/terraform/flatmap"
"github.com/hashicorp/terraform/helper/config"
"github.com/hashicorp/terraform/helper/diff"
"github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/goamz/autoscaling"
)
func resource_aws_autoscaling_group_create(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
autoscalingconn := p.autoscalingconn
// Merge the diff into the state so that we have all the attributes
// properly.
rs := s.MergeDiff(d)
var err error
autoScalingGroupOpts := autoscaling.CreateAutoScalingGroup{}
if rs.Attributes["min_size"] != "" {
autoScalingGroupOpts.MinSize, err = strconv.Atoi(rs.Attributes["min_size"])
autoScalingGroupOpts.SetMinSize = true
}
if rs.Attributes["max_size"] != "" {
autoScalingGroupOpts.MaxSize, err = strconv.Atoi(rs.Attributes["max_size"])
autoScalingGroupOpts.SetMaxSize = true
}
if rs.Attributes["default_cooldown"] != "" {
autoScalingGroupOpts.DefaultCooldown, err = strconv.Atoi(rs.Attributes["default_cooldown"])
autoScalingGroupOpts.SetDefaultCooldown = true
}
if rs.Attributes["desired_capacity"] != "" {
autoScalingGroupOpts.DesiredCapacity, err = strconv.Atoi(rs.Attributes["desired_capacity"])
autoScalingGroupOpts.SetDesiredCapacity = true
}
if rs.Attributes["health_check_grace_period"] != "" {
autoScalingGroupOpts.HealthCheckGracePeriod, err = strconv.Atoi(rs.Attributes["health_check_grace_period"])
autoScalingGroupOpts.SetHealthCheckGracePeriod = true
}
if err != nil {
return nil, fmt.Errorf("Error parsing configuration: %s", err)
}
if _, ok := rs.Attributes["availability_zones.#"]; ok {
autoScalingGroupOpts.AvailZone = expandStringList(flatmap.Expand(
rs.Attributes, "availability_zones").([]interface{}))
}
if _, ok := rs.Attributes["load_balancers.#"]; ok {
autoScalingGroupOpts.LoadBalancerNames = expandStringList(flatmap.Expand(
rs.Attributes, "load_balancers").([]interface{}))
}
if _, ok := rs.Attributes["vpc_zone_identifier.#"]; ok {
autoScalingGroupOpts.VPCZoneIdentifier = expandStringList(flatmap.Expand(
rs.Attributes, "vpc_zone_identifier").([]interface{}))
}
autoScalingGroupOpts.Name = rs.Attributes["name"]
autoScalingGroupOpts.HealthCheckType = rs.Attributes["health_check_type"]
autoScalingGroupOpts.LaunchConfigurationName = rs.Attributes["launch_configuration"]
log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", autoScalingGroupOpts)
_, err = autoscalingconn.CreateAutoScalingGroup(&autoScalingGroupOpts)
if err != nil {
return nil, fmt.Errorf("Error creating AutoScaling Group: %s", err)
}
rs.ID = rs.Attributes["name"]
rs.Dependencies = []terraform.ResourceDependency{
terraform.ResourceDependency{ID: rs.Attributes["launch_configuration"]},
}
log.Printf("[INFO] AutoScaling Group ID: %s", rs.ID)
g, err := resource_aws_autoscaling_group_retrieve(rs.ID, autoscalingconn)
if err != nil {
return rs, err
}
return resource_aws_autoscaling_group_update_state(rs, g)
}
func resource_aws_autoscaling_group_update(
s *terraform.ResourceState,
d *terraform.ResourceDiff,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
autoscalingconn := p.autoscalingconn
rs := s.MergeDiff(d)
opts := autoscaling.UpdateAutoScalingGroup{
Name: rs.ID,
}
var err error
if _, ok := d.Attributes["min_size"]; ok {
opts.MinSize, err = strconv.Atoi(rs.Attributes["min_size"])
opts.SetMinSize = true
}
if _, ok := d.Attributes["max_size"]; ok {
opts.MaxSize, err = strconv.Atoi(rs.Attributes["max_size"])
opts.SetMaxSize = true
}
if err != nil {
return s, fmt.Errorf("Error parsing configuration: %s", err)
}
log.Printf("[DEBUG] AutoScaling Group update configuration: %#v", opts)
_, err = autoscalingconn.UpdateAutoScalingGroup(&opts)
if err != nil {
return rs, fmt.Errorf("Error updating AutoScaling group: %s", err)
}
g, err := resource_aws_autoscaling_group_retrieve(rs.ID, autoscalingconn)
if err != nil {
return rs, err
}
return resource_aws_autoscaling_group_update_state(rs, g)
}
func resource_aws_autoscaling_group_destroy(
s *terraform.ResourceState,
meta interface{}) error {
p := meta.(*ResourceProvider)
autoscalingconn := p.autoscalingconn
log.Printf("[DEBUG] AutoScaling Group destroy: %v", s.ID)
deleteopts := autoscaling.DeleteAutoScalingGroup{Name: s.ID}
// You can force an autoscaling group to delete
// even if it's in the process of scaling a resource.
// Normally, you would set the min-size and max-size to 0,0
// and then delete the group. This bypasses that and leaves
// resources potentially dangling.
if s.Attributes["force_delete"] != "" {
deleteopts.ForceDelete = true
}
_, err := autoscalingconn.DeleteAutoScalingGroup(&deleteopts)
if err != nil {
autoscalingerr, ok := err.(*autoscaling.Error)
if ok && autoscalingerr.Code == "InvalidGroup.NotFound" {
return nil
}
return err
}
return nil
}
func resource_aws_autoscaling_group_refresh(
s *terraform.ResourceState,
meta interface{}) (*terraform.ResourceState, error) {
p := meta.(*ResourceProvider)
autoscalingconn := p.autoscalingconn
g, err := resource_aws_autoscaling_group_retrieve(s.ID, autoscalingconn)
if err != nil {
return s, err
}
return resource_aws_autoscaling_group_update_state(s, g)
}
func resource_aws_autoscaling_group_diff(
s *terraform.ResourceState,
c *terraform.ResourceConfig,
meta interface{}) (*terraform.ResourceDiff, error) {
b := &diff.ResourceBuilder{
Attrs: map[string]diff.AttrType{
"availability_zone": diff.AttrTypeCreate,
"default_cooldown": diff.AttrTypeCreate,
"desired_capacity": diff.AttrTypeCreate,
"force_delete": diff.AttrTypeCreate,
"health_check_grace_period": diff.AttrTypeCreate,
"health_check_type": diff.AttrTypeCreate,
"launch_configuration": diff.AttrTypeCreate,
"load_balancers": diff.AttrTypeCreate,
"name": diff.AttrTypeCreate,
"vpc_zone_identifier": diff.AttrTypeCreate,
"max_size": diff.AttrTypeUpdate,
"min_size": diff.AttrTypeUpdate,
},
ComputedAttrs: []string{
"health_check_grace_period",
"health_check_type",
"default_cooldown",
"vpc_zone_identifier",
"desired_capacity",
"force_delete",
},
}
return b.Diff(s, c)
}
func resource_aws_autoscaling_group_update_state(
s *terraform.ResourceState,
g *autoscaling.AutoScalingGroup) (*terraform.ResourceState, error) {
s.Attributes["min_size"] = strconv.Itoa(g.MinSize)
s.Attributes["max_size"] = strconv.Itoa(g.MaxSize)
s.Attributes["default_cooldown"] = strconv.Itoa(g.DefaultCooldown)
s.Attributes["name"] = g.Name
s.Attributes["desired_capacity"] = strconv.Itoa(g.DesiredCapacity)
s.Attributes["health_check_grace_period"] = strconv.Itoa(g.HealthCheckGracePeriod)
s.Attributes["health_check_type"] = g.HealthCheckType
s.Attributes["launch_configuration"] = g.LaunchConfigurationName
s.Attributes["vpc_zone_identifier"] = g.VPCZoneIdentifier
// Flatten our group values
toFlatten := make(map[string]interface{})
// Special case the return of amazons load balancers names in the XML having
// a blank entry
if len(g.LoadBalancerNames) > 0 && g.LoadBalancerNames[0].LoadBalancerName != "" {
toFlatten["load_balancers"] = flattenLoadBalancers(g.LoadBalancerNames)
}
toFlatten["availability_zones"] = flattenAvailabilityZones(g.AvailabilityZones)
for k, v := range flatmap.Flatten(toFlatten) {
s.Attributes[k] = v
}
return s, nil
}
// Returns a single group by its ID
func resource_aws_autoscaling_group_retrieve(id string, autoscalingconn *autoscaling.AutoScaling) (*autoscaling.AutoScalingGroup, error) {
describeOpts := autoscaling.DescribeAutoScalingGroups{
Names: []string{id},
}
log.Printf("[DEBUG] AutoScaling Group describe configuration: %#v", describeOpts)
describeGroups, err := autoscalingconn.DescribeAutoScalingGroups(&describeOpts)
if err != nil {
return nil, fmt.Errorf("Error retrieving AutoScaling groups: %s", err)
}
// Verify AWS returned our sg
if len(describeGroups.AutoScalingGroups) != 1 ||
describeGroups.AutoScalingGroups[0].Name != id {
if err != nil {
return nil, fmt.Errorf("Unable to find AutoScaling group: %#v", describeGroups.AutoScalingGroups)
}
}
g := describeGroups.AutoScalingGroups[0]
return &g, nil
}
func resource_aws_autoscaling_group_validation() *config.Validator {
return &config.Validator{
Required: []string{
"name",
"max_size",
"min_size",
"availability_zones.*",
"launch_configuration",
},
Optional: []string{
"health_check_grace_period",
"health_check_type",
"desired_capacity",
"force_delete",
"load_balancers.*",
"vpc_zone_identifier.*",
},
}
}
|
package main
import (
"testing"
"os/exec"
"strings"
"regexp"
)
var CMD_SUCCESS = 0
var CMD_FAILURE = 1
func matches(needle string, heystack string) bool {
match, err := regexp.MatchString(needle, heystack)
if err != nil {
panic(err)
}
return match
}
func resetState () {
cmd := exec.Command("rm", "-rf", ".tsinkf")
err := cmd.Run()
if err != nil {
panic(err)
}
}
func tsinkfExec(args string) (string, int) {
cmdLine := []string{"go",
"run",
"utils.go",
"jobs.go",
"journal.go",
"store.go",
"hashing.go",
"fs.go",
"tsinkf.go",
"subcommand.go"}
cmdLine = append(cmdLine, args)
cmd := exec.Command("bash", "-c", strings.Join(cmdLine, " "))
output, err := cmd.CombinedOutput()
if err != nil {
return string(output), CMD_FAILURE
} else {
return string(output), CMD_SUCCESS
}
}
func TestRun(t *testing.T) {
resetState()
cmd := "run echo OKOKOK"
output, status := tsinkfExec(cmd)
if status != CMD_SUCCESS {
t.Fatal("Failed to execute: ", cmd, "\n", output)
}
if ! matches("OKOKOK", output) {
t.Fatal("Running did not produce the expected output \"OKOKOK\"")
}
output, status = tsinkfExec(cmd)
if status != CMD_SUCCESS {
t.Fatal("Failed to execute a second time: ", cmd)
}
if ! matches("^$", output) {
t.Fatal("Re-run should not re-exute but somehow produced output!\n", output)
}
cmd = "show -v"
output, status = tsinkfExec(cmd)
if status != CMD_SUCCESS {
t.Fatal("Failed to execute a show: ", cmd)
}
if ! matches("SUCCEEDED", output) {
t.Fatal("Show log successful statae!\n", output)
}
if ! matches("OKOKOK\n", output) {
t.Fatal("Should include command output\n", output)
}
}
Complaining
package main
import (
"testing"
"os/exec"
"strings"
"regexp"
)
var CMD_SUCCESS = 0
var CMD_FAILURE = 1
func matches(needle string, heystack string) bool {
match, err := regexp.MatchString(needle, heystack)
if err != nil {
panic(err)
}
return match
}
func resetState () {
cmd := exec.Command("rm", "-rf", ".tsinkf")
err := cmd.Run()
if err != nil {
panic(err)
}
}
func tsinkfExec(args string) (string, int) {
cmdLine := []string{"go",
"run",
"utils.go",
"jobs.go",
"journal.go",
"store.go",
"hashing.go",
"fs.go",
"tsinkf.go",
"subcommand.go"}
cmdLine = append(cmdLine, args)
cmd := exec.Command("bash", "-c", strings.Join(cmdLine, " "))
output, err := cmd.CombinedOutput()
if err != nil {
return string(output), CMD_FAILURE
}
return string(output), CMD_SUCCESS
}
func TestRun(t *testing.T) {
resetState()
cmd := "run echo OKOKOK"
output, status := tsinkfExec(cmd)
if status != CMD_SUCCESS {
t.Fatal("Failed to execute: ", cmd, "\n", output)
}
if ! matches("OKOKOK", output) {
t.Fatal("Running did not produce the expected output \"OKOKOK\"")
}
output, status = tsinkfExec(cmd)
if status != CMD_SUCCESS {
t.Fatal("Failed to execute a second time: ", cmd)
}
if ! matches("^$", output) {
t.Fatal("Re-run should not re-exute but somehow produced output!\n", output)
}
cmd = "show -v"
output, status = tsinkfExec(cmd)
if status != CMD_SUCCESS {
t.Fatal("Failed to execute a show: ", cmd)
}
if ! matches("SUCCEEDED", output) {
t.Fatal("Show log successful statae!\n", output)
}
if ! matches("OKOKOK\n", output) {
t.Fatal("Should include command output\n", output)
}
}
|
// +build functional uvmmem
package functional
import (
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/Microsoft/hcsshim/functional/utilities"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus"
)
func memoryBackingTypeToString(bt uvm.MemoryBackingType) string {
switch bt {
case uvm.MemoryBackingTypeVirtual:
return "Virtual"
case uvm.MemoryBackingTypeVirtualDeferred:
return "VirtualDeferred"
case uvm.MemoryBackingTypePhysical:
return "Physical"
default:
panic(fmt.Sprintf("unknown memory type: %v", bt))
}
}
func runMemStartTest(t *testing.T, opts *uvm.UVMOptions) {
u, err := uvm.Create(opts)
if err != nil {
t.Fatal(err)
}
defer u.Terminate()
if err := u.Start(); err != nil {
t.Fatal(err)
}
}
func runMemStartWCOWTest(t *testing.T, opts *uvm.UVMOptions) {
imageName := "microsoft/nanoserver"
layers := testutilities.LayerFolders(t, imageName)
scratchDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(scratchDir)
opts.LayerFolders = layers
runMemStartTest(t, opts)
}
func runMemTests(t *testing.T, os string) {
types := [3]uvm.MemoryBackingType{
uvm.MemoryBackingTypeVirtual,
uvm.MemoryBackingTypeVirtualDeferred,
uvm.MemoryBackingTypePhysical,
}
for _, bt := range types {
opts := &uvm.UVMOptions{
ID: fmt.Sprintf("%s-%s", t.Name(), memoryBackingTypeToString(bt)),
OperatingSystem: os,
MemoryBackingType: &bt,
}
if os == "windows" {
runMemStartWCOWTest(t, opts)
} else {
runMemStartTest(t, opts)
}
}
}
func TestMemBackingTypeWCOW(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
runMemTests(t, "windows")
}
func TestMemBackingTypeLCOW(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
runMemTests(t, "linux")
}
func runBenchMemStartTest(b *testing.B, opts *uvm.UVMOptions) {
u, err := uvm.Create(opts)
if err != nil {
b.Fatal(err)
}
defer u.Terminate()
if err := u.Start(); err != nil {
b.Fatal(err)
}
}
func runBenchMemStartLcowTest(b *testing.B, bt uvm.MemoryBackingType) {
for i := 0; i < b.N; i++ {
opts := &uvm.UVMOptions{
ID: fmt.Sprintf("%s-%s-%d", b.Name(), memoryBackingTypeToString(bt), i),
OperatingSystem: "linux",
MemoryBackingType: &bt,
}
runBenchMemStartTest(b, opts)
}
}
func BenchmarkMemBackingTypeVirtualLCOW(b *testing.B) {
//testutilities.RequiresBuild(t, osversion.RS5)
logrus.SetOutput(ioutil.Discard)
runBenchMemStartLcowTest(b, uvm.MemoryBackingTypeVirtual)
}
func BenchmarkMemBackingTypeVirtualDeferredLCOW(b *testing.B) {
//testutilities.RequiresBuild(t, osversion.RS5)
logrus.SetOutput(ioutil.Discard)
runBenchMemStartLcowTest(b, uvm.MemoryBackingTypeVirtualDeferred)
}
func BenchmarkMemBackingTypePhyscialLCOW(b *testing.B) {
//testutilities.RequiresBuild(t, osversion.RS5)
logrus.SetOutput(ioutil.Discard)
runBenchMemStartLcowTest(b, uvm.MemoryBackingTypePhysical)
}
Use Scratch in TestMemBackingTypeWCOW
Signed-off-by: John Howard <50ae662f5fcde39b7d1ed786133b7c61a25411dd@microsoft.com>
// +build functional uvmmem
package functional
import (
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/Microsoft/hcsshim/functional/utilities"
"github.com/Microsoft/hcsshim/internal/uvm"
"github.com/Microsoft/hcsshim/osversion"
"github.com/sirupsen/logrus"
)
func memoryBackingTypeToString(bt uvm.MemoryBackingType) string {
switch bt {
case uvm.MemoryBackingTypeVirtual:
return "Virtual"
case uvm.MemoryBackingTypeVirtualDeferred:
return "VirtualDeferred"
case uvm.MemoryBackingTypePhysical:
return "Physical"
default:
panic(fmt.Sprintf("unknown memory type: %v", bt))
}
}
func runMemStartTest(t *testing.T, opts *uvm.UVMOptions) {
u, err := uvm.Create(opts)
if err != nil {
t.Fatal(err)
}
defer u.Terminate()
if err := u.Start(); err != nil {
t.Fatal(err)
}
}
func runMemStartWCOWTest(t *testing.T, opts *uvm.UVMOptions) {
imageName := "microsoft/nanoserver"
layers := testutilities.LayerFolders(t, imageName)
scratchDir := testutilities.CreateTempDir(t)
defer os.RemoveAll(scratchDir)
opts.LayerFolders = append(layers, scratchDir)
runMemStartTest(t, opts)
}
func runMemTests(t *testing.T, os string) {
types := [3]uvm.MemoryBackingType{
uvm.MemoryBackingTypeVirtual,
uvm.MemoryBackingTypeVirtualDeferred,
uvm.MemoryBackingTypePhysical,
}
for _, bt := range types {
opts := &uvm.UVMOptions{
ID: fmt.Sprintf("%s-%s", t.Name(), memoryBackingTypeToString(bt)),
OperatingSystem: os,
MemoryBackingType: &bt,
}
if os == "windows" {
runMemStartWCOWTest(t, opts)
} else {
runMemStartTest(t, opts)
}
}
}
func TestMemBackingTypeWCOW(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
runMemTests(t, "windows")
}
func TestMemBackingTypeLCOW(t *testing.T) {
testutilities.RequiresBuild(t, osversion.RS5)
runMemTests(t, "linux")
}
func runBenchMemStartTest(b *testing.B, opts *uvm.UVMOptions) {
u, err := uvm.Create(opts)
if err != nil {
b.Fatal(err)
}
defer u.Terminate()
if err := u.Start(); err != nil {
b.Fatal(err)
}
}
func runBenchMemStartLcowTest(b *testing.B, bt uvm.MemoryBackingType) {
for i := 0; i < b.N; i++ {
opts := &uvm.UVMOptions{
ID: fmt.Sprintf("%s-%s-%d", b.Name(), memoryBackingTypeToString(bt), i),
OperatingSystem: "linux",
MemoryBackingType: &bt,
}
runBenchMemStartTest(b, opts)
}
}
func BenchmarkMemBackingTypeVirtualLCOW(b *testing.B) {
//testutilities.RequiresBuild(t, osversion.RS5)
logrus.SetOutput(ioutil.Discard)
runBenchMemStartLcowTest(b, uvm.MemoryBackingTypeVirtual)
}
func BenchmarkMemBackingTypeVirtualDeferredLCOW(b *testing.B) {
//testutilities.RequiresBuild(t, osversion.RS5)
logrus.SetOutput(ioutil.Discard)
runBenchMemStartLcowTest(b, uvm.MemoryBackingTypeVirtualDeferred)
}
func BenchmarkMemBackingTypePhyscialLCOW(b *testing.B) {
//testutilities.RequiresBuild(t, osversion.RS5)
logrus.SetOutput(ioutil.Discard)
runBenchMemStartLcowTest(b, uvm.MemoryBackingTypePhysical)
}
|
package levant
import (
"fmt"
"strings"
"sync"
nomad "github.com/hashicorp/nomad/api"
nomadStructs "github.com/hashicorp/nomad/nomad/structs"
"github.com/jrasell/levant/logging"
)
// checkFailedDeployment helps log information about deployment failures.
func (l *levantDeployment) checkFailedDeployment(depID *string) {
var allocIDS []string
allocs, _, err := l.nomad.Deployments().Allocations(*depID, nil)
if err != nil {
logging.Error("levant/failure_inspector: unable to query deployment allocations for deployment %s",
depID)
}
// Iterate the allocations on the deployment and create a list of each allocID
// to inspect that is not running.
for _, alloc := range allocs {
if alloc.ClientStatus != nomadStructs.AllocClientStatusRunning {
allocIDS = append(allocIDS, alloc.ID)
}
}
// Setup a waitgroup so the function doesn't return until all allocations have
// been inspected.
var wg sync.WaitGroup
wg.Add(+len(allocIDS))
// Inspect each allocation.
for _, id := range allocIDS {
logging.Debug("levant/failure_inspector: launching allocation inspector for alloc %v", id)
go l.allocInspector(id, &wg)
}
wg.Wait()
}
// allocInspector inspects an allocations events to log any useful information
// which may help debug deployment failures.
func (l *levantDeployment) allocInspector(allocID string, wg *sync.WaitGroup) {
// Inform the wait group we have finished our task upon completion.
defer wg.Done()
resp, _, err := l.nomad.Allocations().Info(allocID, nil)
if err != nil {
logging.Error("levant/failure_inspector: unable to query alloc %v: %v", allocID, err)
return
}
// Iterate each each Task and Event to log any relevant information which may
// help debug deployment failures.
for _, task := range resp.TaskStates {
for _, event := range task.Events {
var desc string
switch event.Type {
case nomad.TaskFailedValidation:
if event.ValidationError != "" {
desc = event.ValidationError
} else {
desc = "validation of task failed"
}
case nomad.TaskSetupFailure:
if event.SetupError != "" {
desc = event.SetupError
} else {
desc = "task setup failed"
}
case nomad.TaskDriverFailure:
if event.DriverError != "" {
desc = event.DriverError
} else {
desc = "failed to start task"
}
case nomad.TaskArtifactDownloadFailed:
if event.DownloadError != "" {
desc = event.DownloadError
} else {
desc = "the task failed to download artifacts"
}
case nomad.TaskKilling:
if event.KillReason != "" {
desc = fmt.Sprintf("the task was killed: %v", event.KillReason)
} else if event.KillTimeout != 0 {
desc = fmt.Sprintf("sent interrupt, waiting %v before force killing", event.KillTimeout)
} else {
desc = "the task was sent interrupt"
}
case nomad.TaskKilled:
if event.KillError != "" {
desc = event.KillError
} else {
desc = "the task was successfully killed"
}
case nomad.TaskTerminated:
var parts []string
parts = append(parts, fmt.Sprintf("exit Code %d", event.ExitCode))
if event.Signal != 0 {
parts = append(parts, fmt.Sprintf("signal %d", event.Signal))
}
if event.Message != "" {
parts = append(parts, fmt.Sprintf("exit message %q", event.Message))
}
desc = strings.Join(parts, ", ")
case nomad.TaskNotRestarting:
if event.RestartReason != "" {
desc = event.RestartReason
} else {
desc = "the task exceeded restart policy"
}
case nomad.TaskSiblingFailed:
if event.FailedSibling != "" {
desc = fmt.Sprintf("task's sibling %q failed", event.FailedSibling)
} else {
desc = "task's sibling failed"
}
case nomad.TaskLeaderDead:
desc = "leader task in group is dead"
}
// If we have matched and have an updated desc then log the appropriate
// information.
if desc != "" {
logging.Error("levant/failure_inspector: alloc %s incurred event %s because %s",
allocID, strings.ToLower(event.Type), strings.TrimSpace(desc))
}
}
}
}
Add a catchall for unhandled failure cases
This adds a catchall to log very useful events that are not handled anywhere else upon deployment failure and are missed under certain failure cases.
package levant
import (
"fmt"
"strings"
"sync"
nomad "github.com/hashicorp/nomad/api"
nomadStructs "github.com/hashicorp/nomad/nomad/structs"
"github.com/jrasell/levant/logging"
)
// checkFailedDeployment helps log information about deployment failures.
func (l *levantDeployment) checkFailedDeployment(depID *string) {
var allocIDS []string
allocs, _, err := l.nomad.Deployments().Allocations(*depID, nil)
if err != nil {
logging.Error("levant/failure_inspector: unable to query deployment allocations for deployment %s",
depID)
}
// Iterate the allocations on the deployment and create a list of each allocID
// to inspect that is not running.
for _, alloc := range allocs {
if alloc.ClientStatus != nomadStructs.AllocClientStatusRunning {
allocIDS = append(allocIDS, alloc.ID)
}
}
// Setup a waitgroup so the function doesn't return until all allocations have
// been inspected.
var wg sync.WaitGroup
wg.Add(+len(allocIDS))
// Inspect each allocation.
for _, id := range allocIDS {
logging.Debug("levant/failure_inspector: launching allocation inspector for alloc %v", id)
go l.allocInspector(id, &wg)
}
wg.Wait()
}
// allocInspector inspects an allocations events to log any useful information
// which may help debug deployment failures.
func (l *levantDeployment) allocInspector(allocID string, wg *sync.WaitGroup) {
// Inform the wait group we have finished our task upon completion.
defer wg.Done()
resp, _, err := l.nomad.Allocations().Info(allocID, nil)
if err != nil {
logging.Error("levant/failure_inspector: unable to query alloc %v: %v", allocID, err)
return
}
// Iterate each each Task and Event to log any relevant information which may
// help debug deployment failures.
for _, task := range resp.TaskStates {
for _, event := range task.Events {
var desc string
switch event.Type {
case nomad.TaskFailedValidation:
if event.ValidationError != "" {
desc = event.ValidationError
} else {
desc = "validation of task failed"
}
case nomad.TaskSetupFailure:
if event.SetupError != "" {
desc = event.SetupError
} else {
desc = "task setup failed"
}
case nomad.TaskDriverFailure:
if event.DriverError != "" {
desc = event.DriverError
} else {
desc = "failed to start task"
}
case nomad.TaskArtifactDownloadFailed:
if event.DownloadError != "" {
desc = event.DownloadError
} else {
desc = "the task failed to download artifacts"
}
case nomad.TaskKilling:
if event.KillReason != "" {
desc = fmt.Sprintf("the task was killed: %v", event.KillReason)
} else if event.KillTimeout != 0 {
desc = fmt.Sprintf("sent interrupt, waiting %v before force killing", event.KillTimeout)
} else {
desc = "the task was sent interrupt"
}
case nomad.TaskKilled:
if event.KillError != "" {
desc = event.KillError
} else {
desc = "the task was successfully killed"
}
case nomad.TaskTerminated:
var parts []string
parts = append(parts, fmt.Sprintf("exit Code %d", event.ExitCode))
if event.Signal != 0 {
parts = append(parts, fmt.Sprintf("signal %d", event.Signal))
}
if event.Message != "" {
parts = append(parts, fmt.Sprintf("exit message %q", event.Message))
}
desc = strings.Join(parts, ", ")
case nomad.TaskNotRestarting:
if event.RestartReason != "" {
desc = event.RestartReason
} else {
desc = "the task exceeded restart policy"
}
case nomad.TaskSiblingFailed:
if event.FailedSibling != "" {
desc = fmt.Sprintf("task's sibling %q failed", event.FailedSibling)
} else {
desc = "task's sibling failed"
}
case nomad.TaskLeaderDead:
desc = "leader task in group is dead"
}
// If we have matched and have an updated desc then log the appropriate
// information.
if desc != "" {
logging.Error("levant/failure_inspector: alloc %s incurred event %s because %s",
allocID, strings.ToLower(event.Type), strings.TrimSpace(desc))
} else {
logging.Error("levant/failure_inspector: alloc %s logged for failure; event_type: %s; message: %s",
allocID,
strings.ToLower(event.Type),
strings.ToLower(event.DisplayMessage))
}
}
}
}
|
package modules
import (
"bufio"
"fmt"
"github.com/davidscholberg/go-i3barjson"
"os/exec"
"strings"
)
// Zfs represents the configuration data for the ZFS block
type Zfs struct {
BlockConfigBase `yaml:",inline"`
PoolName string `yaml:"zpool_name"`
ZpoolBin string `yaml:"zpool_bin"`
}
// UpdateBlock updates the ZFS block
func (c Zfs) UpdateBlock(b *i3barjson.Block) {
b.Color = c.Color
fullTextFmt := fmt.Sprintf("%s%s - %%s", c.Label, c.PoolName)
zpoolCmd := exec.Command("sudo", c.ZpoolBin, "status", c.PoolName)
out, err := zpoolCmd.StdoutPipe()
if err != nil {
b.Urgent = true
b.FullText = fmt.Sprintf(fullTextFmt, err.Error())
return
}
if err := zpoolCmd.Start(); err != nil {
b.Urgent = true
b.FullText = fmt.Sprintf(fullTextFmt, err.Error())
return
}
defer zpoolCmd.Wait()
buff := bufio.NewScanner(out)
for buff.Scan() {
line := strings.TrimSpace(buff.Text())
if strings.HasPrefix(line, "state") {
split := strings.Split(line, ":")
status := strings.TrimSpace(split[1])
if status == "ONLINE" {
b.Urgent = false
} else {
b.Urgent = true
}
b.FullText = fmt.Sprintf(fullTextFmt, status)
return
}
}
b.Urgent = true
b.FullText = fmt.Sprintf(fullTextFmt, "NOT FOUND")
return
}
Only use Label and status in block output
package modules
import (
"bufio"
"fmt"
"github.com/davidscholberg/go-i3barjson"
"os/exec"
"strings"
)
// Zfs represents the configuration data for the ZFS block
type Zfs struct {
BlockConfigBase `yaml:",inline"`
PoolName string `yaml:"zpool_name"`
ZpoolBin string `yaml:"zpool_bin"`
}
// UpdateBlock updates the ZFS block
func (c Zfs) UpdateBlock(b *i3barjson.Block) {
b.Color = c.Color
fullTextFmt := fmt.Sprintf("%s%%s", c.Label)
zpoolCmd := exec.Command("sudo", c.ZpoolBin, "status", c.PoolName)
out, err := zpoolCmd.StdoutPipe()
if err != nil {
b.Urgent = true
b.FullText = fmt.Sprintf(fullTextFmt, err.Error())
return
}
if err := zpoolCmd.Start(); err != nil {
b.Urgent = true
b.FullText = fmt.Sprintf(fullTextFmt, err.Error())
return
}
defer zpoolCmd.Wait()
buff := bufio.NewScanner(out)
for buff.Scan() {
line := strings.TrimSpace(buff.Text())
if strings.HasPrefix(line, "state") {
split := strings.Split(line, ":")
status := strings.TrimSpace(split[1])
if status == "ONLINE" {
b.Urgent = false
} else {
b.Urgent = true
}
b.FullText = fmt.Sprintf(fullTextFmt, status)
return
}
}
b.Urgent = true
b.FullText = fmt.Sprintf(fullTextFmt, "NOT FOUND")
return
}
|
package tunnel
import (
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"strconv"
"strings"
"sync"
"testing"
"time"
)
var debug = false
type testEnv struct {
server *Server
client *Client
remoteListener net.Listener
localListener net.Listener
}
type testConfig struct {
localHandler http.Handler
}
func singleTestEnvironment(cfg *testConfig) (*testEnv, error) {
if cfg == nil {
cfg = &testConfig{}
}
var identifier = "123abc"
tunnelServer, _ := NewServer(&ServerConfig{Debug: debug})
remoteServer := http.Server{Handler: tunnelServer}
remoteListener, err := net.Listen("tcp", ":0")
if err != nil {
return nil, err
}
tunnelServer.AddHost(remoteListener.Addr().String(), identifier)
go remoteServer.Serve(remoteListener)
localListener, err := net.Listen("tcp", ":0")
if err != nil {
return nil, err
}
tunnelClient, _ := NewClient(&ClientConfig{
Identifier: identifier,
ServerAddr: remoteListener.Addr().String(),
LocalAddr: localListener.Addr().String(),
Debug: debug,
})
go tunnelClient.Start()
<-tunnelClient.StartNotify()
localHandler := echo()
if cfg.localHandler != nil {
localHandler = cfg.localHandler
}
localServer := http.Server{Handler: localHandler}
go localServer.Serve(localListener)
return &testEnv{
server: tunnelServer,
client: tunnelClient,
remoteListener: remoteListener,
localListener: localListener,
}, nil
}
func (t *testEnv) Close() {
if t.client != nil {
t.client.Close()
}
if t.remoteListener != nil {
t.remoteListener.Close()
}
if t.localListener != nil {
t.localListener.Close()
}
}
func TestMultipleRequest(t *testing.T) {
tenv, err := singleTestEnvironment(nil)
if err != nil {
t.Fatal(err)
}
defer tenv.Close()
// make a request to tunnelserver, this should be tunneled to local server
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
msg := "hello" + strconv.Itoa(i)
res, err := makeRequest(tenv.remoteListener.Addr().String(), msg)
if err != nil {
t.Errorf("make request: %s", err)
}
if res != msg {
t.Errorf("Expecting %s, got %s", msg, res)
}
}(i)
}
wg.Wait()
tenv.Close()
}
func TestMultipleLatencyRequest(t *testing.T) {
tenv, err := singleTestEnvironment(&testConfig{
localHandler: randomLatencyEcho(),
})
if err != nil {
t.Fatal(err)
}
defer tenv.Close()
// make a request to tunnelserver, this should be tunneled to local server
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
msg := "hello" + strconv.Itoa(i)
res, err := makeRequest(tenv.remoteListener.Addr().String(), msg)
if err != nil {
t.Errorf("make request: %s", err)
}
if res != msg {
t.Errorf("Expecting %s, got %s", msg, res)
}
}(i)
}
wg.Wait()
tenv.Close()
}
func TestNoClient(t *testing.T) {
tenv, err := singleTestEnvironment(nil)
if err != nil {
t.Fatal(err)
}
// close client, this is the main point of the test
tenv.client.Close()
msg := "hello"
res, err := makeRequest(tenv.remoteListener.Addr().String(), msg)
if err != nil {
t.Errorf("make request: %s", err)
}
if res != "no client session established" {
t.Errorf("Expecting %s, got %s", msg, res)
}
tenv.Close()
}
func TestNoLocalServer(t *testing.T) {
tenv, err := singleTestEnvironment(nil)
if err != nil {
t.Fatal(err)
}
// close local listener, this is the main point of the test
tenv.localListener.Close()
msg := "hello"
res, err := makeRequest(tenv.remoteListener.Addr().String(), msg)
if err != nil {
t.Errorf("make request: %s", err)
}
if res != "no local server" {
t.Errorf("Expecting %s, got %s", msg, res)
}
tenv.Close()
}
func TestSingleRequest(t *testing.T) {
tenv, err := singleTestEnvironment(nil)
if err != nil {
t.Fatal(err)
}
msg := "hello"
res, err := makeRequest(tenv.remoteListener.Addr().String(), msg)
if err != nil {
t.Errorf("make request: %s", err)
}
if res != msg {
t.Errorf("Expecting %s, got %s", msg, res)
}
tenv.Close()
}
func TestSingleLatencyRequest(t *testing.T) {
tenv, err := singleTestEnvironment(&testConfig{
localHandler: randomLatencyEcho(),
})
if err != nil {
t.Fatal(err)
}
msg := "hello"
res, err := makeRequest(tenv.remoteListener.Addr().String(), msg)
if err != nil {
t.Errorf("make request: %s", err)
}
if res != msg {
t.Errorf("Expecting %s, got %s", msg, res)
}
tenv.Close()
}
func makeRequest(serverAddr, msg string) (string, error) {
resp, err := http.Get("http://" + serverAddr + "/?echo=" + msg)
if err != nil {
return "", err
}
defer resp.Body.Close()
res, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return strings.TrimSpace(string(res)), nil
}
func echo() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
msg := r.URL.Query().Get("echo")
io.WriteString(w, msg)
})
}
func randomLatencyEcho() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(time.Duration(rand.Intn(2000)) * time.Millisecond)
msg := r.URL.Query().Get("echo")
io.WriteString(w, msg)
})
}
tunnel_test: enable debug mode to see what's happening
package tunnel
import (
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"strconv"
"strings"
"sync"
"testing"
"time"
)
var debug = true
type testEnv struct {
server *Server
client *Client
remoteListener net.Listener
localListener net.Listener
}
type testConfig struct {
localHandler http.Handler
}
func singleTestEnvironment(cfg *testConfig) (*testEnv, error) {
if cfg == nil {
cfg = &testConfig{}
}
var identifier = "123abc"
tunnelServer, _ := NewServer(&ServerConfig{Debug: debug})
remoteServer := http.Server{Handler: tunnelServer}
remoteListener, err := net.Listen("tcp", ":0")
if err != nil {
return nil, err
}
tunnelServer.AddHost(remoteListener.Addr().String(), identifier)
go remoteServer.Serve(remoteListener)
localListener, err := net.Listen("tcp", ":0")
if err != nil {
return nil, err
}
tunnelClient, _ := NewClient(&ClientConfig{
Identifier: identifier,
ServerAddr: remoteListener.Addr().String(),
LocalAddr: localListener.Addr().String(),
Debug: debug,
})
go tunnelClient.Start()
<-tunnelClient.StartNotify()
localHandler := echo()
if cfg.localHandler != nil {
localHandler = cfg.localHandler
}
localServer := http.Server{Handler: localHandler}
go localServer.Serve(localListener)
return &testEnv{
server: tunnelServer,
client: tunnelClient,
remoteListener: remoteListener,
localListener: localListener,
}, nil
}
func (t *testEnv) Close() {
if t.client != nil {
t.client.Close()
}
if t.remoteListener != nil {
t.remoteListener.Close()
}
if t.localListener != nil {
t.localListener.Close()
}
}
func TestMultipleRequest(t *testing.T) {
tenv, err := singleTestEnvironment(nil)
if err != nil {
t.Fatal(err)
}
defer tenv.Close()
// make a request to tunnelserver, this should be tunneled to local server
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
msg := "hello" + strconv.Itoa(i)
res, err := makeRequest(tenv.remoteListener.Addr().String(), msg)
if err != nil {
t.Errorf("make request: %s", err)
}
if res != msg {
t.Errorf("Expecting %s, got %s", msg, res)
}
}(i)
}
wg.Wait()
tenv.Close()
}
func TestMultipleLatencyRequest(t *testing.T) {
tenv, err := singleTestEnvironment(&testConfig{
localHandler: randomLatencyEcho(),
})
if err != nil {
t.Fatal(err)
}
defer tenv.Close()
// make a request to tunnelserver, this should be tunneled to local server
var wg sync.WaitGroup
for i := 0; i < 100; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
msg := "hello" + strconv.Itoa(i)
res, err := makeRequest(tenv.remoteListener.Addr().String(), msg)
if err != nil {
t.Errorf("make request: %s", err)
}
if res != msg {
t.Errorf("Expecting %s, got %s", msg, res)
}
}(i)
}
wg.Wait()
tenv.Close()
}
func TestNoClient(t *testing.T) {
tenv, err := singleTestEnvironment(nil)
if err != nil {
t.Fatal(err)
}
// close client, this is the main point of the test
tenv.client.Close()
msg := "hello"
res, err := makeRequest(tenv.remoteListener.Addr().String(), msg)
if err != nil {
t.Errorf("make request: %s", err)
}
if res != "no client session established" {
t.Errorf("Expecting '%s', got '%s'", "no client session established", res)
}
tenv.Close()
}
func TestNoLocalServer(t *testing.T) {
tenv, err := singleTestEnvironment(nil)
if err != nil {
t.Fatal(err)
}
// close local listener, this is the main point of the test
tenv.localListener.Close()
msg := "hello"
res, err := makeRequest(tenv.remoteListener.Addr().String(), msg)
if err != nil {
t.Errorf("make request: %s", err)
}
if res != "no local server" {
t.Errorf("Expecting %s, got %s", msg, res)
}
tenv.Close()
}
func TestSingleRequest(t *testing.T) {
tenv, err := singleTestEnvironment(nil)
if err != nil {
t.Fatal(err)
}
msg := "hello"
res, err := makeRequest(tenv.remoteListener.Addr().String(), msg)
if err != nil {
t.Errorf("make request: %s", err)
}
if res != msg {
t.Errorf("Expecting %s, got %s", msg, res)
}
tenv.Close()
}
func TestSingleLatencyRequest(t *testing.T) {
tenv, err := singleTestEnvironment(&testConfig{
localHandler: randomLatencyEcho(),
})
if err != nil {
t.Fatal(err)
}
msg := "hello"
res, err := makeRequest(tenv.remoteListener.Addr().String(), msg)
if err != nil {
t.Errorf("make request: %s", err)
}
if res != msg {
t.Errorf("Expecting %s, got %s", msg, res)
}
tenv.Close()
}
func makeRequest(serverAddr, msg string) (string, error) {
resp, err := http.Get("http://" + serverAddr + "/?echo=" + msg)
if err != nil {
return "", err
}
defer resp.Body.Close()
res, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return strings.TrimSpace(string(res)), nil
}
func echo() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
msg := r.URL.Query().Get("echo")
io.WriteString(w, msg)
})
}
func randomLatencyEcho() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(time.Duration(rand.Intn(2000)) * time.Millisecond)
msg := r.URL.Query().Get("echo")
io.WriteString(w, msg)
})
}
|
package termite
// TODO - this list of imports is scary; split up?
import (
"bytes"
"fmt"
"github.com/hanwen/go-fuse/fuse"
"io"
"io/ioutil"
"log"
"net"
"os"
"os/user"
"path/filepath"
"strings"
"sync"
)
type WorkerTask struct {
fuseFs *WorkerFuseFs
*WorkRequest
*WorkReply
stdinConn net.Conn
mirror *Mirror
}
func (me *WorkerTask) Run() os.Error {
me.fuseFs.MountState.Debug = me.WorkRequest.Debug
me.fuseFs.fsConnector.Debug = me.WorkRequest.Debug
rStdout, wStdout, err := os.Pipe()
if err != nil {
return err
}
rStderr, wStderr, err := os.Pipe()
if err != nil {
return err
}
rStdin, wStdin, err := os.Pipe()
if err != nil {
return err
}
attr := os.ProcAttr{
Env: me.WorkRequest.Env,
Files: []*os.File{rStdin, wStdout, wStderr},
}
cmd := []string{}
binary := ""
if os.Geteuid() == 0 {
nobody, err := user.Lookup("nobody")
if err != nil {
return err
}
binary = me.mirror.daemon.ChrootBinary
cmd = []string{binary, "-dir", me.WorkRequest.Dir,
"-uid", fmt.Sprintf("%d", nobody.Uid), "-gid", fmt.Sprintf("%d", nobody.Gid),
"-binary", me.WorkRequest.Binary,
me.fuseFs.mount}
newcmd := make([]string, len(cmd)+len(me.WorkRequest.Argv))
copy(newcmd, cmd)
copy(newcmd[len(cmd):], me.WorkRequest.Argv)
cmd = newcmd
} else {
cmd = me.WorkRequest.Argv
binary = me.WorkRequest.Argv[0]
attr.Dir = filepath.Join(me.fuseFs.mount, me.WorkRequest.Dir)
log.Println("running in", attr.Dir)
}
log.Println("starting cmd", cmd, "in", me.fuseFs.mount)
proc, err := os.StartProcess(binary, cmd, &attr)
if err != nil {
log.Println("Error", err)
return err
}
wStdout.Close()
wStderr.Close()
rStdin.Close()
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
var wg sync.WaitGroup
wg.Add(2)
go func() {
io.Copy(stdout, rStdout)
wg.Done()
}()
go func() {
io.Copy(stderr, rStderr)
wg.Done()
}()
if me.stdinConn != nil {
go func() {
HookedCopy(wStdin, me.stdinConn, PrintStdinSliceLen)
// No waiting: if the process exited, we kill the connection.
wStdin.Close()
}()
} else {
wStdin.Close()
}
me.WorkReply.Exit, err = proc.Wait(0)
wg.Wait()
// No waiting: if the process exited, we kill the connection.
if me.stdinConn != nil {
me.stdinConn.Close()
}
// We could use a connection here too, but this is simpler.
me.WorkReply.Stdout = stdout.String()
me.WorkReply.Stderr = stderr.String()
err = me.fillReply()
if err != nil {
log.Println("discarding FUSE due to error:", me.fuseFs.mount, err)
me.mirror.discardFuse(me.fuseFs)
} else {
// Must do updateFiles before ReturnFuse, since the
// next job should not see out-of-date files.
me.mirror.updateFiles(me.WorkReply.Files)
me.mirror.returnFuse(me.fuseFs)
}
return err
}
func (me *WorkerTask) fillReply() os.Error {
saver := &fileSaver{
rwDir: me.fuseFs.rwDir,
prefix: me.mirror.writableRoot,
cache: me.mirror.daemon.contentCache,
}
saver.reapBackingStore()
me.WorkReply.Files = saver.files
return saver.err
}
type fileSaver struct {
rwDir string
prefix string
err os.Error
files []FileAttr
cache *ContentCache
}
func (me *fileSaver) VisitFile(path string, osInfo *os.FileInfo) {
me.savePath(path, osInfo)
}
func (me *fileSaver) VisitDir(path string, osInfo *os.FileInfo) bool {
me.savePath(path, osInfo)
return me.err == nil
}
func (me *fileSaver) savePath(path string, osInfo *os.FileInfo) {
if me.err != nil {
return
}
if !strings.HasPrefix(path, me.rwDir) {
log.Println("Weird file", path)
return
}
fi := FileAttr{
FileInfo: osInfo,
Path: path[len(me.rwDir):],
}
if !strings.HasPrefix(fi.Path, me.prefix) || fi.Path == "/"+_DELETIONS {
return
}
ftype := osInfo.Mode &^ 07777
switch ftype {
case fuse.S_IFDIR:
// nothing.
// TODO - remove dir.
case fuse.S_IFREG:
fi.Hash, fi.Content = me.cache.DestructiveSavePath(path)
if fi.Hash == nil {
me.err = os.NewError("DestructiveSavePath fail")
}
case fuse.S_IFLNK:
val, err := os.Readlink(path)
me.err = err
fi.Link = val
os.Remove(path)
default:
log.Fatalf("Unknown file type %o", ftype)
}
me.files = append(me.files, fi)
}
func (me *fileSaver) reapBackingStore() {
dir := filepath.Join(me.rwDir, _DELETIONS)
_, err := os.Lstat(dir)
if err == nil {
matches, err := filepath.Glob(dir + "/*")
if err != nil {
me.err = err
return
}
for _, fullPath := range matches {
contents, err := ioutil.ReadFile(fullPath)
if err != nil {
me.err = err
return
}
me.files = append(me.files, FileAttr{
Status: fuse.ENOENT,
Path: "/" + string(contents),
})
me.err = os.Remove(fullPath)
if me.err != nil {
break
}
}
}
if me.err == nil {
filepath.Walk(me.rwDir, me, nil)
}
for i, _ := range me.files {
if me.err != nil {
break
}
f := me.files[len(me.files)-i-1]
if f.FileInfo != nil && f.FileInfo.IsDirectory() && f.Path != me.prefix {
me.err = os.Remove(filepath.Join(me.rwDir, f.Path))
}
}
}
Fix fd leak in task.go
package termite
// TODO - this list of imports is scary; split up?
import (
"bytes"
"fmt"
"github.com/hanwen/go-fuse/fuse"
"io"
"io/ioutil"
"log"
"net"
"os"
"os/user"
"path/filepath"
"strings"
"sync"
)
type WorkerTask struct {
fuseFs *WorkerFuseFs
*WorkRequest
*WorkReply
stdinConn net.Conn
mirror *Mirror
}
func (me *WorkerTask) Run() os.Error {
me.fuseFs.MountState.Debug = me.WorkRequest.Debug
me.fuseFs.fsConnector.Debug = me.WorkRequest.Debug
rStdout, wStdout, err := os.Pipe()
if err != nil {
return err
}
rStderr, wStderr, err := os.Pipe()
if err != nil {
return err
}
rStdin, wStdin, err := os.Pipe()
if err != nil {
return err
}
attr := os.ProcAttr{
Env: me.WorkRequest.Env,
Files: []*os.File{rStdin, wStdout, wStderr},
}
cmd := []string{}
binary := ""
if os.Geteuid() == 0 {
nobody, err := user.Lookup("nobody")
if err != nil {
return err
}
binary = me.mirror.daemon.ChrootBinary
cmd = []string{binary, "-dir", me.WorkRequest.Dir,
"-uid", fmt.Sprintf("%d", nobody.Uid), "-gid", fmt.Sprintf("%d", nobody.Gid),
"-binary", me.WorkRequest.Binary,
me.fuseFs.mount}
newcmd := make([]string, len(cmd)+len(me.WorkRequest.Argv))
copy(newcmd, cmd)
copy(newcmd[len(cmd):], me.WorkRequest.Argv)
cmd = newcmd
} else {
cmd = me.WorkRequest.Argv
binary = me.WorkRequest.Argv[0]
attr.Dir = filepath.Join(me.fuseFs.mount, me.WorkRequest.Dir)
log.Println("running in", attr.Dir)
}
log.Println("starting cmd", cmd, "in", me.fuseFs.mount)
proc, err := os.StartProcess(binary, cmd, &attr)
if err != nil {
log.Println("Error", err)
return err
}
wStdout.Close()
wStderr.Close()
rStdin.Close()
stdout := &bytes.Buffer{}
stderr := &bytes.Buffer{}
var wg sync.WaitGroup
wg.Add(2)
go func() {
io.Copy(stdout, rStdout)
rStdout.Close()
wg.Done()
}()
go func() {
io.Copy(stderr, rStderr)
rStderr.Close()
wg.Done()
}()
if me.stdinConn != nil {
go func() {
HookedCopy(wStdin, me.stdinConn, PrintStdinSliceLen)
// No waiting: if the process exited, we kill the connection.
wStdin.Close()
}()
} else {
wStdin.Close()
}
me.WorkReply.Exit, err = proc.Wait(0)
wg.Wait()
// No waiting: if the process exited, we kill the connection.
if me.stdinConn != nil {
me.stdinConn.Close()
}
// We could use a connection here too, but this is simpler.
me.WorkReply.Stdout = stdout.String()
me.WorkReply.Stderr = stderr.String()
err = me.fillReply()
if err != nil {
log.Println("discarding FUSE due to error:", me.fuseFs.mount, err)
me.mirror.discardFuse(me.fuseFs)
} else {
// Must do updateFiles before ReturnFuse, since the
// next job should not see out-of-date files.
me.mirror.updateFiles(me.WorkReply.Files)
me.mirror.returnFuse(me.fuseFs)
}
return err
}
func (me *WorkerTask) fillReply() os.Error {
saver := &fileSaver{
rwDir: me.fuseFs.rwDir,
prefix: me.mirror.writableRoot,
cache: me.mirror.daemon.contentCache,
}
saver.reapBackingStore()
me.WorkReply.Files = saver.files
return saver.err
}
type fileSaver struct {
rwDir string
prefix string
err os.Error
files []FileAttr
cache *ContentCache
}
func (me *fileSaver) VisitFile(path string, osInfo *os.FileInfo) {
me.savePath(path, osInfo)
}
func (me *fileSaver) VisitDir(path string, osInfo *os.FileInfo) bool {
me.savePath(path, osInfo)
return me.err == nil
}
func (me *fileSaver) savePath(path string, osInfo *os.FileInfo) {
if me.err != nil {
return
}
if !strings.HasPrefix(path, me.rwDir) {
log.Println("Weird file", path)
return
}
fi := FileAttr{
FileInfo: osInfo,
Path: path[len(me.rwDir):],
}
if !strings.HasPrefix(fi.Path, me.prefix) || fi.Path == "/"+_DELETIONS {
return
}
ftype := osInfo.Mode &^ 07777
switch ftype {
case fuse.S_IFDIR:
// nothing.
// TODO - remove dir.
case fuse.S_IFREG:
fi.Hash, fi.Content = me.cache.DestructiveSavePath(path)
if fi.Hash == nil {
me.err = os.NewError("DestructiveSavePath fail")
}
case fuse.S_IFLNK:
val, err := os.Readlink(path)
me.err = err
fi.Link = val
os.Remove(path)
default:
log.Fatalf("Unknown file type %o", ftype)
}
me.files = append(me.files, fi)
}
func (me *fileSaver) reapBackingStore() {
dir := filepath.Join(me.rwDir, _DELETIONS)
_, err := os.Lstat(dir)
if err == nil {
matches, err := filepath.Glob(dir + "/*")
if err != nil {
me.err = err
return
}
for _, fullPath := range matches {
contents, err := ioutil.ReadFile(fullPath)
if err != nil {
me.err = err
return
}
me.files = append(me.files, FileAttr{
Status: fuse.ENOENT,
Path: "/" + string(contents),
})
me.err = os.Remove(fullPath)
if me.err != nil {
break
}
}
}
if me.err == nil {
filepath.Walk(me.rwDir, me, nil)
}
for i, _ := range me.files {
if me.err != nil {
break
}
f := me.files[len(me.files)-i-1]
if f.FileInfo != nil && f.FileInfo.IsDirectory() && f.Path != me.prefix {
me.err = os.Remove(filepath.Join(me.rwDir, f.Path))
}
}
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"errors"
"fmt"
"sync"
"time"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/buffer"
"k8s.io/utils/clock"
"k8s.io/klog/v2"
)
// SharedInformer provides eventually consistent linkage of its
// clients to the authoritative state of a given collection of
// objects. An object is identified by its API group, kind/resource,
// namespace (if any), and name; the `ObjectMeta.UID` is not part of
// an object's ID as far as this contract is concerned. One
// SharedInformer provides linkage to objects of a particular API
// group and kind/resource. The linked object collection of a
// SharedInformer may be further restricted to one namespace (if
// applicable) and/or by label selector and/or field selector.
//
// The authoritative state of an object is what apiservers provide
// access to, and an object goes through a strict sequence of states.
// An object state is either (1) present with a ResourceVersion and
// other appropriate content or (2) "absent".
//
// A SharedInformer maintains a local cache --- exposed by GetStore(),
// by GetIndexer() in the case of an indexed informer, and possibly by
// machinery involved in creating and/or accessing the informer --- of
// the state of each relevant object. This cache is eventually
// consistent with the authoritative state. This means that, unless
// prevented by persistent communication problems, if ever a
// particular object ID X is authoritatively associated with a state S
// then for every SharedInformer I whose collection includes (X, S)
// eventually either (1) I's cache associates X with S or a later
// state of X, (2) I is stopped, or (3) the authoritative state
// service for X terminates. To be formally complete, we say that the
// absent state meets any restriction by label selector or field
// selector.
//
// For a given informer and relevant object ID X, the sequence of
// states that appears in the informer's cache is a subsequence of the
// states authoritatively associated with X. That is, some states
// might never appear in the cache but ordering among the appearing
// states is correct. Note, however, that there is no promise about
// ordering between states seen for different objects.
//
// The local cache starts out empty, and gets populated and updated
// during `Run()`.
//
// As a simple example, if a collection of objects is henceforth
// unchanging, a SharedInformer is created that links to that
// collection, and that SharedInformer is `Run()` then that
// SharedInformer's cache eventually holds an exact copy of that
// collection (unless it is stopped too soon, the authoritative state
// service ends, or communication problems between the two
// persistently thwart achievement).
//
// As another simple example, if the local cache ever holds a
// non-absent state for some object ID and the object is eventually
// removed from the authoritative state then eventually the object is
// removed from the local cache (unless the SharedInformer is stopped
// too soon, the authoritative state service ends, or communication
// problems persistently thwart the desired result).
//
// The keys in the Store are of the form namespace/name for namespaced
// objects, and are simply the name for non-namespaced objects.
// Clients can use `MetaNamespaceKeyFunc(obj)` to extract the key for
// a given object, and `SplitMetaNamespaceKey(key)` to split a key
// into its constituent parts.
//
// Every query against the local cache is answered entirely from one
// snapshot of the cache's state. Thus, the result of a `List` call
// will not contain two entries with the same namespace and name.
//
// A client is identified here by a ResourceEventHandler. For every
// update to the SharedInformer's local cache and for every client
// added before `Run()`, eventually either the SharedInformer is
// stopped or the client is notified of the update. A client added
// after `Run()` starts gets a startup batch of notifications of
// additions of the objects existing in the cache at the time that
// client was added; also, for every update to the SharedInformer's
// local cache after that client was added, eventually either the
// SharedInformer is stopped or that client is notified of that
// update. Client notifications happen after the corresponding cache
// update and, in the case of a SharedIndexInformer, after the
// corresponding index updates. It is possible that additional cache
// and index updates happen before such a prescribed notification.
// For a given SharedInformer and client, the notifications are
// delivered sequentially. For a given SharedInformer, client, and
// object ID, the notifications are delivered in order. Because
// `ObjectMeta.UID` has no role in identifying objects, it is possible
// that when (1) object O1 with ID (e.g. namespace and name) X and
// `ObjectMeta.UID` U1 in the SharedInformer's local cache is deleted
// and later (2) another object O2 with ID X and ObjectMeta.UID U2 is
// created the informer's clients are not notified of (1) and (2) but
// rather are notified only of an update from O1 to O2. Clients that
// need to detect such cases might do so by comparing the `ObjectMeta.UID`
// field of the old and the new object in the code that handles update
// notifications (i.e. `OnUpdate` method of ResourceEventHandler).
//
// A client must process each notification promptly; a SharedInformer
// is not engineered to deal well with a large backlog of
// notifications to deliver. Lengthy processing should be passed off
// to something else, for example through a
// `client-go/util/workqueue`.
//
// A delete notification exposes the last locally known non-absent
// state, except that its ResourceVersion is replaced with a
// ResourceVersion in which the object is actually absent.
type SharedInformer interface {
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
// period. Events to a single handler are delivered sequentially, but there is no coordination
// between different handlers.
// It returns a registration handle for the handler that can be used to remove
// the handler again.
AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error)
// AddEventHandlerWithResyncPeriod adds an event handler to the
// shared informer with the requested resync period; zero means
// this handler does not care about resyncs. The resync operation
// consists of delivering to the handler an update notification
// for every object in the informer's local cache; it does not add
// any interactions with the authoritative storage. Some
// informers do no resyncs at all, not even for handlers added
// with a non-zero resyncPeriod. For an informer that does
// resyncs, and for each handler that requests resyncs, that
// informer develops a nominal resync period that is no shorter
// than the requested period but may be longer. The actual time
// between any two resyncs may be longer than the nominal period
// because the implementation takes time to do work and there may
// be competing load and scheduling noise.
// It returns a registration handle for the handler that can be used to remove
// the handler again and an error if the handler cannot be added.
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error)
// RemoveEventHandler removes a formerly added event handler given by
// its registration handle.
// This function is guaranteed to be idempotent, and thread-safe.
RemoveEventHandler(handle ResourceEventHandlerRegistration) error
// GetStore returns the informer's local cache as a Store.
GetStore() Store
// GetController is deprecated, it does nothing useful
GetController() Controller
// Run starts and runs the shared informer, returning after it stops.
// The informer will be stopped when stopCh is closed.
Run(stopCh <-chan struct{})
// HasSynced returns true if the shared informer's store has been
// informed by at least one full LIST of the authoritative state
// of the informer's object collection. This is unrelated to "resync".
HasSynced() bool
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
// store. The value returned is not synchronized with access to the underlying store and is not
// thread-safe.
LastSyncResourceVersion() string
// The WatchErrorHandler is called whenever ListAndWatch drops the
// connection with an error. After calling this handler, the informer
// will backoff and retry.
//
// The default implementation looks at the error type and tries to log
// the error message at an appropriate level.
//
// There's only one handler, so if you call this multiple times, last one
// wins; calling after the informer has been started returns an error.
//
// The handler is intended for visibility, not to e.g. pause the consumers.
// The handler should return quickly - any expensive processing should be
// offloaded.
SetWatchErrorHandler(handler WatchErrorHandler) error
// The TransformFunc is called for each object which is about to be stored.
//
// This function is intended for you to take the opportunity to
// remove, transform, or normalize fields. One use case is to strip unused
// metadata fields out of objects to save on RAM cost.
//
// Must be set before starting the informer.
//
// Note: Since the object given to the handler may be already shared with
// other goroutines, it is advisable to copy the object being
// transform before mutating it at all and returning the copy to prevent
// data races.
SetTransform(handler TransformFunc) error
// IsStopped reports whether the informer has already been stopped.
// Adding event handlers to already stopped informers is not possible.
// An informer already stopped will never be started again.
IsStopped() bool
}
// Opaque interface representing the registration of ResourceEventHandler for
// a SharedInformer. Must be supplied back to the same SharedInformer's
// `RemoveEventHandler` to unregister the handlers.
type ResourceEventHandlerRegistration interface{}
// SharedIndexInformer provides add and get Indexers ability based on SharedInformer.
type SharedIndexInformer interface {
SharedInformer
// AddIndexers add indexers to the informer before it starts.
AddIndexers(indexers Indexers) error
GetIndexer() Indexer
}
// NewSharedInformer creates a new instance for the listwatcher.
func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer {
return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{})
}
// NewSharedIndexInformer creates a new instance for the listwatcher.
// The created informer will not do resyncs if the given
// defaultEventHandlerResyncPeriod is zero. Otherwise: for each
// handler that with a non-zero requested resync period, whether added
// before or after the informer starts, the nominal resync period is
// the requested resync period rounded up to a multiple of the
// informer's resync checking period. Such an informer's resync
// checking period is established when the informer starts running,
// and is the maximum of (a) the minimum of the resync periods
// requested before the informer starts and the
// defaultEventHandlerResyncPeriod given here and (b) the constant
// `minimumResyncPeriod` defined in this file.
func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
realClock := &clock.RealClock{}
sharedIndexInformer := &sharedIndexInformer{
processor: &sharedProcessor{clock: realClock},
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
listerWatcher: lw,
objectType: exampleObject,
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)),
clock: realClock,
}
return sharedIndexInformer
}
// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced.
type InformerSynced func() bool
const (
// syncedPollPeriod controls how often you look at the status of your sync funcs
syncedPollPeriod = 100 * time.Millisecond
// initialBufferSize is the initial number of event notifications that can be buffered.
initialBufferSize = 1024
)
// WaitForNamedCacheSync is a wrapper around WaitForCacheSync that generates log messages
// indicating that the caller identified by name is waiting for syncs, followed by
// either a successful or failed sync.
func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
klog.Infof("Waiting for caches to sync for %s", controllerName)
if !WaitForCacheSync(stopCh, cacheSyncs...) {
utilruntime.HandleError(fmt.Errorf("unable to sync caches for %s", controllerName))
return false
}
klog.Infof("Caches are synced for %s", controllerName)
return true
}
// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false
// if the controller should shutdown
// callers should prefer WaitForNamedCacheSync()
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
err := wait.PollImmediateUntil(syncedPollPeriod,
func() (bool, error) {
for _, syncFunc := range cacheSyncs {
if !syncFunc() {
return false, nil
}
}
return true, nil
},
stopCh)
if err != nil {
klog.V(2).Infof("stop requested")
return false
}
klog.V(4).Infof("caches populated")
return true
}
// `*sharedIndexInformer` implements SharedIndexInformer and has three
// main components. One is an indexed local cache, `indexer Indexer`.
// The second main component is a Controller that pulls
// objects/notifications using the ListerWatcher and pushes them into
// a DeltaFIFO --- whose knownObjects is the informer's local cache
// --- while concurrently Popping Deltas values from that fifo and
// processing them with `sharedIndexInformer::HandleDeltas`. Each
// invocation of HandleDeltas, which is done with the fifo's lock
// held, processes each Delta in turn. For each Delta this both
// updates the local cache and stuffs the relevant notification into
// the sharedProcessor. The third main component is that
// sharedProcessor, which is responsible for relaying those
// notifications to each of the informer's clients.
type sharedIndexInformer struct {
indexer Indexer
controller Controller
processor *sharedProcessor
cacheMutationDetector MutationDetector
listerWatcher ListerWatcher
// objectType is an example object of the type this informer is
// expected to handle. Only the type needs to be right, except
// that when that is `unstructured.Unstructured` the object's
// `"apiVersion"` and `"kind"` must also be right.
objectType runtime.Object
// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
// shouldResync to check if any of our listeners need a resync.
resyncCheckPeriod time.Duration
// defaultEventHandlerResyncPeriod is the default resync period for any handlers added via
// AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default
// value).
defaultEventHandlerResyncPeriod time.Duration
// clock allows for testability
clock clock.Clock
started, stopped bool
startedLock sync.Mutex
// blockDeltas gives a way to stop all event distribution so that a late event handler
// can safely join the shared informer.
blockDeltas sync.Mutex
// Called whenever the ListAndWatch drops the connection with an error.
watchErrorHandler WatchErrorHandler
transform TransformFunc
}
// dummyController hides the fact that a SharedInformer is different from a dedicated one
// where a caller can `Run`. The run method is disconnected in this case, because higher
// level logic will decide when to start the SharedInformer and related controller.
// Because returning information back is always asynchronous, the legacy callers shouldn't
// notice any change in behavior.
type dummyController struct {
informer *sharedIndexInformer
}
func (v *dummyController) Run(stopCh <-chan struct{}) {
}
func (v *dummyController) HasSynced() bool {
return v.informer.HasSynced()
}
func (v *dummyController) LastSyncResourceVersion() string {
return ""
}
type updateNotification struct {
oldObj interface{}
newObj interface{}
}
type addNotification struct {
newObj interface{}
}
type deleteNotification struct {
oldObj interface{}
}
func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) error {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.started {
return fmt.Errorf("informer has already started")
}
s.watchErrorHandler = handler
return nil
}
func (s *sharedIndexInformer) SetTransform(handler TransformFunc) error {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.started {
return fmt.Errorf("informer has already started")
}
s.transform = handler
return nil
}
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
if s.HasStarted() {
klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed")
return
}
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
KnownObjects: s.indexer,
EmitDeltaTypeReplaced: true,
})
cfg := &Config{
Queue: fifo,
ListerWatcher: s.listerWatcher,
ObjectType: s.objectType,
FullResyncPeriod: s.resyncCheckPeriod,
RetryOnError: false,
ShouldResync: s.processor.shouldResync,
Process: s.HandleDeltas,
WatchErrorHandler: s.watchErrorHandler,
}
func() {
s.startedLock.Lock()
defer s.startedLock.Unlock()
s.controller = New(cfg)
s.controller.(*controller).clock = s.clock
s.started = true
}()
// Separate stop channel because Processor should be stopped strictly after controller
processorStopCh := make(chan struct{})
var wg wait.Group
defer wg.Wait() // Wait for Processor to stop
defer close(processorStopCh) // Tell Processor to stop
wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run)
wg.StartWithChannel(processorStopCh, s.processor.run)
defer func() {
s.startedLock.Lock()
defer s.startedLock.Unlock()
s.stopped = true // Don't want any new listeners
}()
s.controller.Run(stopCh)
}
func (s *sharedIndexInformer) HasStarted() bool {
s.startedLock.Lock()
defer s.startedLock.Unlock()
return s.started
}
func (s *sharedIndexInformer) HasSynced() bool {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.controller == nil {
return false
}
return s.controller.HasSynced()
}
func (s *sharedIndexInformer) LastSyncResourceVersion() string {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.controller == nil {
return ""
}
return s.controller.LastSyncResourceVersion()
}
func (s *sharedIndexInformer) GetStore() Store {
return s.indexer
}
func (s *sharedIndexInformer) GetIndexer() Indexer {
return s.indexer
}
func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.started {
return fmt.Errorf("informer has already started")
}
return s.indexer.AddIndexers(indexers)
}
func (s *sharedIndexInformer) GetController() Controller {
return &dummyController{informer: s}
}
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) {
return s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod)
}
func determineResyncPeriod(desired, check time.Duration) time.Duration {
if desired == 0 {
return desired
}
if check == 0 {
klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
return 0
}
if desired < check {
klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
return check
}
return desired
}
const minimumResyncPeriod = 1 * time.Second
func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.stopped {
return nil, fmt.Errorf("handler %v was not added to shared informer because it has stopped already", handler)
}
if resyncPeriod > 0 {
if resyncPeriod < minimumResyncPeriod {
klog.Warningf("resyncPeriod %v is too small. Changing it to the minimum allowed value of %v", resyncPeriod, minimumResyncPeriod)
resyncPeriod = minimumResyncPeriod
}
if resyncPeriod < s.resyncCheckPeriod {
if s.started {
klog.Warningf("resyncPeriod %v is smaller than resyncCheckPeriod %v and the informer has already started. Changing it to %v", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
resyncPeriod = s.resyncCheckPeriod
} else {
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
// resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners
// accordingly
s.resyncCheckPeriod = resyncPeriod
s.processor.resyncCheckPeriodChanged(resyncPeriod)
}
}
}
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
if !s.started {
return s.processor.addListener(listener), nil
}
// in order to safely join, we have to
// 1. stop sending add/update/delete notifications
// 2. do a list against the store
// 3. send synthetic "Add" events to the new handler
// 4. unblock
s.blockDeltas.Lock()
defer s.blockDeltas.Unlock()
handle := s.processor.addListener(listener)
for _, item := range s.indexer.List() {
listener.add(addNotification{newObj: item})
}
return handle, nil
}
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
s.blockDeltas.Lock()
defer s.blockDeltas.Unlock()
if deltas, ok := obj.(Deltas); ok {
return processDeltas(s, s.indexer, s.transform, deltas)
}
return errors.New("object given as Process argument is not Deltas")
}
// Conforms to ResourceEventHandler
func (s *sharedIndexInformer) OnAdd(obj interface{}) {
// Invocation of this function is locked under s.blockDeltas, so it is
// save to distribute the notification
s.cacheMutationDetector.AddObject(obj)
s.processor.distribute(addNotification{newObj: obj}, false)
}
// Conforms to ResourceEventHandler
func (s *sharedIndexInformer) OnUpdate(old, new interface{}) {
isSync := false
// If is a Sync event, isSync should be true
// If is a Replaced event, isSync is true if resource version is unchanged.
// If RV is unchanged: this is a Sync/Replaced event, so isSync is true
if accessor, err := meta.Accessor(new); err == nil {
if oldAccessor, err := meta.Accessor(old); err == nil {
// Events that didn't change resourceVersion are treated as resync events
// and only propagated to listeners that requested resync
isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion()
}
}
// Invocation of this function is locked under s.blockDeltas, so it is
// save to distribute the notification
s.cacheMutationDetector.AddObject(new)
s.processor.distribute(updateNotification{oldObj: old, newObj: new}, isSync)
}
// Conforms to ResourceEventHandler
func (s *sharedIndexInformer) OnDelete(old interface{}) {
// Invocation of this function is locked under s.blockDeltas, so it is
// save to distribute the notification
s.processor.distribute(deleteNotification{oldObj: old}, false)
}
// IsStopped reports whether the informer has already been stopped
func (s *sharedIndexInformer) IsStopped() bool {
s.startedLock.Lock()
defer s.startedLock.Unlock()
return s.stopped
}
func (s *sharedIndexInformer) RemoveEventHandler(handle ResourceEventHandlerRegistration) error {
s.startedLock.Lock()
defer s.startedLock.Unlock()
// in order to safely remove, we have to
// 1. stop sending add/update/delete notifications
// 2. remove and stop listener
// 3. unblock
s.blockDeltas.Lock()
defer s.blockDeltas.Unlock()
return s.processor.removeListener(handle)
}
// sharedProcessor has a collection of processorListener and can
// distribute a notification object to its listeners. There are two
// kinds of distribute operations. The sync distributions go to a
// subset of the listeners that (a) is recomputed in the occasional
// calls to shouldResync and (b) every listener is initially put in.
// The non-sync distributions go to every listener.
type sharedProcessor struct {
listenersStarted bool
listenersLock sync.RWMutex
// Map from listeners to whether or not they are currently syncing
listeners map[*processorListener]bool
clock clock.Clock
wg wait.Group
}
func (p *sharedProcessor) getListener(registration ResourceEventHandlerRegistration) *processorListener {
p.listenersLock.RLock()
defer p.listenersLock.RUnlock()
if p.listeners == nil {
return nil
}
if result, ok := registration.(*processorListener); ok {
if _, exists := p.listeners[result]; exists {
return result
}
}
return nil
}
func (p *sharedProcessor) addListener(listener *processorListener) ResourceEventHandlerRegistration {
p.listenersLock.Lock()
defer p.listenersLock.Unlock()
if p.listeners == nil {
p.listeners = make(map[*processorListener]bool)
}
p.listeners[listener] = true
if p.listenersStarted {
p.wg.Start(listener.run)
p.wg.Start(listener.pop)
}
return listener
}
func (p *sharedProcessor) removeListener(handle ResourceEventHandlerRegistration) error {
p.listenersLock.Lock()
defer p.listenersLock.Unlock()
listener, ok := handle.(*processorListener)
if !ok {
return fmt.Errorf("invalid key type %t", handle)
} else if p.listeners == nil {
// No listeners are registered, do nothing
return nil
} else if _, exists := p.listeners[listener]; !exists {
// Listener is not registered, just do nothing
return nil
}
delete(p.listeners, listener)
if p.listenersStarted {
close(listener.addCh)
}
return nil
}
func (p *sharedProcessor) distribute(obj interface{}, sync bool) {
p.listenersLock.RLock()
defer p.listenersLock.RUnlock()
for listener, isSyncing := range p.listeners {
if !sync || isSyncing {
listener.add(obj)
}
}
}
func (p *sharedProcessor) run(stopCh <-chan struct{}) {
func() {
p.listenersLock.RLock()
defer p.listenersLock.RUnlock()
for listener := range p.listeners {
p.wg.Start(listener.run)
p.wg.Start(listener.pop)
}
p.listenersStarted = true
}()
<-stopCh
func() {
p.listenersLock.Lock()
defer p.listenersLock.Unlock()
for listener := range p.listeners {
close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop
}
// Wipe out list of listeners since they are now closed
// (processorListener cannot be re-used)
p.listeners = nil
}()
p.wg.Wait() // Wait for all .pop() and .run() to stop
}
// shouldResync queries every listener to determine if any of them need a resync, based on each
// listener's resyncPeriod.
func (p *sharedProcessor) shouldResync() bool {
p.listenersLock.Lock()
defer p.listenersLock.Unlock()
resyncNeeded := false
now := p.clock.Now()
for listener := range p.listeners {
// need to loop through all the listeners to see if they need to resync so we can prepare any
// listeners that are going to be resyncing.
shouldResync := listener.shouldResync(now)
p.listeners[listener] = shouldResync
if shouldResync {
resyncNeeded = true
listener.determineNextResync(now)
}
}
return resyncNeeded
}
func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) {
p.listenersLock.RLock()
defer p.listenersLock.RUnlock()
for listener := range p.listeners {
resyncPeriod := determineResyncPeriod(
listener.requestedResyncPeriod, resyncCheckPeriod)
listener.setResyncPeriod(resyncPeriod)
}
}
// processorListener relays notifications from a sharedProcessor to
// one ResourceEventHandler --- using two goroutines, two unbuffered
// channels, and an unbounded ring buffer. The `add(notification)`
// function sends the given notification to `addCh`. One goroutine
// runs `pop()`, which pumps notifications from `addCh` to `nextCh`
// using storage in the ring buffer while `nextCh` is not keeping up.
// Another goroutine runs `run()`, which receives notifications from
// `nextCh` and synchronously invokes the appropriate handler method.
//
// processorListener also keeps track of the adjusted requested resync
// period of the listener.
type processorListener struct {
nextCh chan interface{}
addCh chan interface{}
handler ResourceEventHandler
// pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed.
// There is one per listener, but a failing/stalled listener will have infinite pendingNotifications
// added until we OOM.
// TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but
// we should try to do something better.
pendingNotifications buffer.RingGrowing
// requestedResyncPeriod is how frequently the listener wants a
// full resync from the shared informer, but modified by two
// adjustments. One is imposing a lower bound,
// `minimumResyncPeriod`. The other is another lower bound, the
// sharedIndexInformer's `resyncCheckPeriod`, that is imposed (a) only
// in AddEventHandlerWithResyncPeriod invocations made after the
// sharedIndexInformer starts and (b) only if the informer does
// resyncs at all.
requestedResyncPeriod time.Duration
// resyncPeriod is the threshold that will be used in the logic
// for this listener. This value differs from
// requestedResyncPeriod only when the sharedIndexInformer does
// not do resyncs, in which case the value here is zero. The
// actual time between resyncs depends on when the
// sharedProcessor's `shouldResync` function is invoked and when
// the sharedIndexInformer processes `Sync` type Delta objects.
resyncPeriod time.Duration
// nextResync is the earliest time the listener should get a full resync
nextResync time.Time
// resyncLock guards access to resyncPeriod and nextResync
resyncLock sync.Mutex
}
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener {
ret := &processorListener{
nextCh: make(chan interface{}),
addCh: make(chan interface{}),
handler: handler,
pendingNotifications: *buffer.NewRingGrowing(bufferSize),
requestedResyncPeriod: requestedResyncPeriod,
resyncPeriod: resyncPeriod,
}
ret.determineNextResync(now)
return ret
}
func (p *processorListener) add(notification interface{}) {
p.addCh <- notification
}
func (p *processorListener) pop() {
defer utilruntime.HandleCrash()
defer close(p.nextCh) // Tell .run() to stop
var nextCh chan<- interface{}
var notification interface{}
for {
select {
case nextCh <- notification:
// Notification dispatched
var ok bool
notification, ok = p.pendingNotifications.ReadOne()
if !ok { // Nothing to pop
nextCh = nil // Disable this select case
}
case notificationToAdd, ok := <-p.addCh:
if !ok {
return
}
if notification == nil { // No notification to pop (and pendingNotifications is empty)
// Optimize the case - skip adding to pendingNotifications
notification = notificationToAdd
nextCh = p.nextCh
} else { // There is already a notification waiting to be dispatched
p.pendingNotifications.WriteOne(notificationToAdd)
}
}
}
}
func (p *processorListener) run() {
// this call blocks until the channel is closed. When a panic happens during the notification
// we will catch it, **the offending item will be skipped!**, and after a short delay (one second)
// the next notification will be attempted. This is usually better than the alternative of never
// delivering again.
stopCh := make(chan struct{})
wait.Until(func() {
for next := range p.nextCh {
switch notification := next.(type) {
case updateNotification:
p.handler.OnUpdate(notification.oldObj, notification.newObj)
case addNotification:
p.handler.OnAdd(notification.newObj)
case deleteNotification:
p.handler.OnDelete(notification.oldObj)
default:
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next))
}
}
// the only way to get here is if the p.nextCh is empty and closed
close(stopCh)
}, 1*time.Second, stopCh)
}
// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0,
// this always returns false.
func (p *processorListener) shouldResync(now time.Time) bool {
p.resyncLock.Lock()
defer p.resyncLock.Unlock()
if p.resyncPeriod == 0 {
return false
}
return now.After(p.nextResync) || now.Equal(p.nextResync)
}
func (p *processorListener) determineNextResync(now time.Time) {
p.resyncLock.Lock()
defer p.resyncLock.Unlock()
p.nextResync = now.Add(p.resyncPeriod)
}
func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) {
p.resyncLock.Lock()
defer p.resyncLock.Unlock()
p.resyncPeriod = resyncPeriod
}
reset listenersStarted
for correctness. technically shouldnt be an issue since restarting a stopped processor is not supported
Kubernetes-commit: 3a81341cfa6f7e2ca1b9bfc195c567dcdfaa4dea
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"errors"
"fmt"
"sync"
"time"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/buffer"
"k8s.io/utils/clock"
"k8s.io/klog/v2"
)
// SharedInformer provides eventually consistent linkage of its
// clients to the authoritative state of a given collection of
// objects. An object is identified by its API group, kind/resource,
// namespace (if any), and name; the `ObjectMeta.UID` is not part of
// an object's ID as far as this contract is concerned. One
// SharedInformer provides linkage to objects of a particular API
// group and kind/resource. The linked object collection of a
// SharedInformer may be further restricted to one namespace (if
// applicable) and/or by label selector and/or field selector.
//
// The authoritative state of an object is what apiservers provide
// access to, and an object goes through a strict sequence of states.
// An object state is either (1) present with a ResourceVersion and
// other appropriate content or (2) "absent".
//
// A SharedInformer maintains a local cache --- exposed by GetStore(),
// by GetIndexer() in the case of an indexed informer, and possibly by
// machinery involved in creating and/or accessing the informer --- of
// the state of each relevant object. This cache is eventually
// consistent with the authoritative state. This means that, unless
// prevented by persistent communication problems, if ever a
// particular object ID X is authoritatively associated with a state S
// then for every SharedInformer I whose collection includes (X, S)
// eventually either (1) I's cache associates X with S or a later
// state of X, (2) I is stopped, or (3) the authoritative state
// service for X terminates. To be formally complete, we say that the
// absent state meets any restriction by label selector or field
// selector.
//
// For a given informer and relevant object ID X, the sequence of
// states that appears in the informer's cache is a subsequence of the
// states authoritatively associated with X. That is, some states
// might never appear in the cache but ordering among the appearing
// states is correct. Note, however, that there is no promise about
// ordering between states seen for different objects.
//
// The local cache starts out empty, and gets populated and updated
// during `Run()`.
//
// As a simple example, if a collection of objects is henceforth
// unchanging, a SharedInformer is created that links to that
// collection, and that SharedInformer is `Run()` then that
// SharedInformer's cache eventually holds an exact copy of that
// collection (unless it is stopped too soon, the authoritative state
// service ends, or communication problems between the two
// persistently thwart achievement).
//
// As another simple example, if the local cache ever holds a
// non-absent state for some object ID and the object is eventually
// removed from the authoritative state then eventually the object is
// removed from the local cache (unless the SharedInformer is stopped
// too soon, the authoritative state service ends, or communication
// problems persistently thwart the desired result).
//
// The keys in the Store are of the form namespace/name for namespaced
// objects, and are simply the name for non-namespaced objects.
// Clients can use `MetaNamespaceKeyFunc(obj)` to extract the key for
// a given object, and `SplitMetaNamespaceKey(key)` to split a key
// into its constituent parts.
//
// Every query against the local cache is answered entirely from one
// snapshot of the cache's state. Thus, the result of a `List` call
// will not contain two entries with the same namespace and name.
//
// A client is identified here by a ResourceEventHandler. For every
// update to the SharedInformer's local cache and for every client
// added before `Run()`, eventually either the SharedInformer is
// stopped or the client is notified of the update. A client added
// after `Run()` starts gets a startup batch of notifications of
// additions of the objects existing in the cache at the time that
// client was added; also, for every update to the SharedInformer's
// local cache after that client was added, eventually either the
// SharedInformer is stopped or that client is notified of that
// update. Client notifications happen after the corresponding cache
// update and, in the case of a SharedIndexInformer, after the
// corresponding index updates. It is possible that additional cache
// and index updates happen before such a prescribed notification.
// For a given SharedInformer and client, the notifications are
// delivered sequentially. For a given SharedInformer, client, and
// object ID, the notifications are delivered in order. Because
// `ObjectMeta.UID` has no role in identifying objects, it is possible
// that when (1) object O1 with ID (e.g. namespace and name) X and
// `ObjectMeta.UID` U1 in the SharedInformer's local cache is deleted
// and later (2) another object O2 with ID X and ObjectMeta.UID U2 is
// created the informer's clients are not notified of (1) and (2) but
// rather are notified only of an update from O1 to O2. Clients that
// need to detect such cases might do so by comparing the `ObjectMeta.UID`
// field of the old and the new object in the code that handles update
// notifications (i.e. `OnUpdate` method of ResourceEventHandler).
//
// A client must process each notification promptly; a SharedInformer
// is not engineered to deal well with a large backlog of
// notifications to deliver. Lengthy processing should be passed off
// to something else, for example through a
// `client-go/util/workqueue`.
//
// A delete notification exposes the last locally known non-absent
// state, except that its ResourceVersion is replaced with a
// ResourceVersion in which the object is actually absent.
type SharedInformer interface {
// AddEventHandler adds an event handler to the shared informer using the shared informer's resync
// period. Events to a single handler are delivered sequentially, but there is no coordination
// between different handlers.
// It returns a registration handle for the handler that can be used to remove
// the handler again.
AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error)
// AddEventHandlerWithResyncPeriod adds an event handler to the
// shared informer with the requested resync period; zero means
// this handler does not care about resyncs. The resync operation
// consists of delivering to the handler an update notification
// for every object in the informer's local cache; it does not add
// any interactions with the authoritative storage. Some
// informers do no resyncs at all, not even for handlers added
// with a non-zero resyncPeriod. For an informer that does
// resyncs, and for each handler that requests resyncs, that
// informer develops a nominal resync period that is no shorter
// than the requested period but may be longer. The actual time
// between any two resyncs may be longer than the nominal period
// because the implementation takes time to do work and there may
// be competing load and scheduling noise.
// It returns a registration handle for the handler that can be used to remove
// the handler again and an error if the handler cannot be added.
AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error)
// RemoveEventHandler removes a formerly added event handler given by
// its registration handle.
// This function is guaranteed to be idempotent, and thread-safe.
RemoveEventHandler(handle ResourceEventHandlerRegistration) error
// GetStore returns the informer's local cache as a Store.
GetStore() Store
// GetController is deprecated, it does nothing useful
GetController() Controller
// Run starts and runs the shared informer, returning after it stops.
// The informer will be stopped when stopCh is closed.
Run(stopCh <-chan struct{})
// HasSynced returns true if the shared informer's store has been
// informed by at least one full LIST of the authoritative state
// of the informer's object collection. This is unrelated to "resync".
HasSynced() bool
// LastSyncResourceVersion is the resource version observed when last synced with the underlying
// store. The value returned is not synchronized with access to the underlying store and is not
// thread-safe.
LastSyncResourceVersion() string
// The WatchErrorHandler is called whenever ListAndWatch drops the
// connection with an error. After calling this handler, the informer
// will backoff and retry.
//
// The default implementation looks at the error type and tries to log
// the error message at an appropriate level.
//
// There's only one handler, so if you call this multiple times, last one
// wins; calling after the informer has been started returns an error.
//
// The handler is intended for visibility, not to e.g. pause the consumers.
// The handler should return quickly - any expensive processing should be
// offloaded.
SetWatchErrorHandler(handler WatchErrorHandler) error
// The TransformFunc is called for each object which is about to be stored.
//
// This function is intended for you to take the opportunity to
// remove, transform, or normalize fields. One use case is to strip unused
// metadata fields out of objects to save on RAM cost.
//
// Must be set before starting the informer.
//
// Note: Since the object given to the handler may be already shared with
// other goroutines, it is advisable to copy the object being
// transform before mutating it at all and returning the copy to prevent
// data races.
SetTransform(handler TransformFunc) error
// IsStopped reports whether the informer has already been stopped.
// Adding event handlers to already stopped informers is not possible.
// An informer already stopped will never be started again.
IsStopped() bool
}
// Opaque interface representing the registration of ResourceEventHandler for
// a SharedInformer. Must be supplied back to the same SharedInformer's
// `RemoveEventHandler` to unregister the handlers.
type ResourceEventHandlerRegistration interface{}
// SharedIndexInformer provides add and get Indexers ability based on SharedInformer.
type SharedIndexInformer interface {
SharedInformer
// AddIndexers add indexers to the informer before it starts.
AddIndexers(indexers Indexers) error
GetIndexer() Indexer
}
// NewSharedInformer creates a new instance for the listwatcher.
func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer {
return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{})
}
// NewSharedIndexInformer creates a new instance for the listwatcher.
// The created informer will not do resyncs if the given
// defaultEventHandlerResyncPeriod is zero. Otherwise: for each
// handler that with a non-zero requested resync period, whether added
// before or after the informer starts, the nominal resync period is
// the requested resync period rounded up to a multiple of the
// informer's resync checking period. Such an informer's resync
// checking period is established when the informer starts running,
// and is the maximum of (a) the minimum of the resync periods
// requested before the informer starts and the
// defaultEventHandlerResyncPeriod given here and (b) the constant
// `minimumResyncPeriod` defined in this file.
func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer {
realClock := &clock.RealClock{}
sharedIndexInformer := &sharedIndexInformer{
processor: &sharedProcessor{clock: realClock},
indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers),
listerWatcher: lw,
objectType: exampleObject,
resyncCheckPeriod: defaultEventHandlerResyncPeriod,
defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod,
cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)),
clock: realClock,
}
return sharedIndexInformer
}
// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced.
type InformerSynced func() bool
const (
// syncedPollPeriod controls how often you look at the status of your sync funcs
syncedPollPeriod = 100 * time.Millisecond
// initialBufferSize is the initial number of event notifications that can be buffered.
initialBufferSize = 1024
)
// WaitForNamedCacheSync is a wrapper around WaitForCacheSync that generates log messages
// indicating that the caller identified by name is waiting for syncs, followed by
// either a successful or failed sync.
func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
klog.Infof("Waiting for caches to sync for %s", controllerName)
if !WaitForCacheSync(stopCh, cacheSyncs...) {
utilruntime.HandleError(fmt.Errorf("unable to sync caches for %s", controllerName))
return false
}
klog.Infof("Caches are synced for %s", controllerName)
return true
}
// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false
// if the controller should shutdown
// callers should prefer WaitForNamedCacheSync()
func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool {
err := wait.PollImmediateUntil(syncedPollPeriod,
func() (bool, error) {
for _, syncFunc := range cacheSyncs {
if !syncFunc() {
return false, nil
}
}
return true, nil
},
stopCh)
if err != nil {
klog.V(2).Infof("stop requested")
return false
}
klog.V(4).Infof("caches populated")
return true
}
// `*sharedIndexInformer` implements SharedIndexInformer and has three
// main components. One is an indexed local cache, `indexer Indexer`.
// The second main component is a Controller that pulls
// objects/notifications using the ListerWatcher and pushes them into
// a DeltaFIFO --- whose knownObjects is the informer's local cache
// --- while concurrently Popping Deltas values from that fifo and
// processing them with `sharedIndexInformer::HandleDeltas`. Each
// invocation of HandleDeltas, which is done with the fifo's lock
// held, processes each Delta in turn. For each Delta this both
// updates the local cache and stuffs the relevant notification into
// the sharedProcessor. The third main component is that
// sharedProcessor, which is responsible for relaying those
// notifications to each of the informer's clients.
type sharedIndexInformer struct {
indexer Indexer
controller Controller
processor *sharedProcessor
cacheMutationDetector MutationDetector
listerWatcher ListerWatcher
// objectType is an example object of the type this informer is
// expected to handle. Only the type needs to be right, except
// that when that is `unstructured.Unstructured` the object's
// `"apiVersion"` and `"kind"` must also be right.
objectType runtime.Object
// resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call
// shouldResync to check if any of our listeners need a resync.
resyncCheckPeriod time.Duration
// defaultEventHandlerResyncPeriod is the default resync period for any handlers added via
// AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default
// value).
defaultEventHandlerResyncPeriod time.Duration
// clock allows for testability
clock clock.Clock
started, stopped bool
startedLock sync.Mutex
// blockDeltas gives a way to stop all event distribution so that a late event handler
// can safely join the shared informer.
blockDeltas sync.Mutex
// Called whenever the ListAndWatch drops the connection with an error.
watchErrorHandler WatchErrorHandler
transform TransformFunc
}
// dummyController hides the fact that a SharedInformer is different from a dedicated one
// where a caller can `Run`. The run method is disconnected in this case, because higher
// level logic will decide when to start the SharedInformer and related controller.
// Because returning information back is always asynchronous, the legacy callers shouldn't
// notice any change in behavior.
type dummyController struct {
informer *sharedIndexInformer
}
func (v *dummyController) Run(stopCh <-chan struct{}) {
}
func (v *dummyController) HasSynced() bool {
return v.informer.HasSynced()
}
func (v *dummyController) LastSyncResourceVersion() string {
return ""
}
type updateNotification struct {
oldObj interface{}
newObj interface{}
}
type addNotification struct {
newObj interface{}
}
type deleteNotification struct {
oldObj interface{}
}
func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) error {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.started {
return fmt.Errorf("informer has already started")
}
s.watchErrorHandler = handler
return nil
}
func (s *sharedIndexInformer) SetTransform(handler TransformFunc) error {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.started {
return fmt.Errorf("informer has already started")
}
s.transform = handler
return nil
}
func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
if s.HasStarted() {
klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed")
return
}
fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{
KnownObjects: s.indexer,
EmitDeltaTypeReplaced: true,
})
cfg := &Config{
Queue: fifo,
ListerWatcher: s.listerWatcher,
ObjectType: s.objectType,
FullResyncPeriod: s.resyncCheckPeriod,
RetryOnError: false,
ShouldResync: s.processor.shouldResync,
Process: s.HandleDeltas,
WatchErrorHandler: s.watchErrorHandler,
}
func() {
s.startedLock.Lock()
defer s.startedLock.Unlock()
s.controller = New(cfg)
s.controller.(*controller).clock = s.clock
s.started = true
}()
// Separate stop channel because Processor should be stopped strictly after controller
processorStopCh := make(chan struct{})
var wg wait.Group
defer wg.Wait() // Wait for Processor to stop
defer close(processorStopCh) // Tell Processor to stop
wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run)
wg.StartWithChannel(processorStopCh, s.processor.run)
defer func() {
s.startedLock.Lock()
defer s.startedLock.Unlock()
s.stopped = true // Don't want any new listeners
}()
s.controller.Run(stopCh)
}
func (s *sharedIndexInformer) HasStarted() bool {
s.startedLock.Lock()
defer s.startedLock.Unlock()
return s.started
}
func (s *sharedIndexInformer) HasSynced() bool {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.controller == nil {
return false
}
return s.controller.HasSynced()
}
func (s *sharedIndexInformer) LastSyncResourceVersion() string {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.controller == nil {
return ""
}
return s.controller.LastSyncResourceVersion()
}
func (s *sharedIndexInformer) GetStore() Store {
return s.indexer
}
func (s *sharedIndexInformer) GetIndexer() Indexer {
return s.indexer
}
func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.started {
return fmt.Errorf("informer has already started")
}
return s.indexer.AddIndexers(indexers)
}
func (s *sharedIndexInformer) GetController() Controller {
return &dummyController{informer: s}
}
func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) {
return s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod)
}
func determineResyncPeriod(desired, check time.Duration) time.Duration {
if desired == 0 {
return desired
}
if check == 0 {
klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
return 0
}
if desired < check {
klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
return check
}
return desired
}
const minimumResyncPeriod = 1 * time.Second
func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) {
s.startedLock.Lock()
defer s.startedLock.Unlock()
if s.stopped {
return nil, fmt.Errorf("handler %v was not added to shared informer because it has stopped already", handler)
}
if resyncPeriod > 0 {
if resyncPeriod < minimumResyncPeriod {
klog.Warningf("resyncPeriod %v is too small. Changing it to the minimum allowed value of %v", resyncPeriod, minimumResyncPeriod)
resyncPeriod = minimumResyncPeriod
}
if resyncPeriod < s.resyncCheckPeriod {
if s.started {
klog.Warningf("resyncPeriod %v is smaller than resyncCheckPeriod %v and the informer has already started. Changing it to %v", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
resyncPeriod = s.resyncCheckPeriod
} else {
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update
// resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners
// accordingly
s.resyncCheckPeriod = resyncPeriod
s.processor.resyncCheckPeriodChanged(resyncPeriod)
}
}
}
listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize)
if !s.started {
return s.processor.addListener(listener), nil
}
// in order to safely join, we have to
// 1. stop sending add/update/delete notifications
// 2. do a list against the store
// 3. send synthetic "Add" events to the new handler
// 4. unblock
s.blockDeltas.Lock()
defer s.blockDeltas.Unlock()
handle := s.processor.addListener(listener)
for _, item := range s.indexer.List() {
listener.add(addNotification{newObj: item})
}
return handle, nil
}
func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error {
s.blockDeltas.Lock()
defer s.blockDeltas.Unlock()
if deltas, ok := obj.(Deltas); ok {
return processDeltas(s, s.indexer, s.transform, deltas)
}
return errors.New("object given as Process argument is not Deltas")
}
// Conforms to ResourceEventHandler
func (s *sharedIndexInformer) OnAdd(obj interface{}) {
// Invocation of this function is locked under s.blockDeltas, so it is
// save to distribute the notification
s.cacheMutationDetector.AddObject(obj)
s.processor.distribute(addNotification{newObj: obj}, false)
}
// Conforms to ResourceEventHandler
func (s *sharedIndexInformer) OnUpdate(old, new interface{}) {
isSync := false
// If is a Sync event, isSync should be true
// If is a Replaced event, isSync is true if resource version is unchanged.
// If RV is unchanged: this is a Sync/Replaced event, so isSync is true
if accessor, err := meta.Accessor(new); err == nil {
if oldAccessor, err := meta.Accessor(old); err == nil {
// Events that didn't change resourceVersion are treated as resync events
// and only propagated to listeners that requested resync
isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion()
}
}
// Invocation of this function is locked under s.blockDeltas, so it is
// save to distribute the notification
s.cacheMutationDetector.AddObject(new)
s.processor.distribute(updateNotification{oldObj: old, newObj: new}, isSync)
}
// Conforms to ResourceEventHandler
func (s *sharedIndexInformer) OnDelete(old interface{}) {
// Invocation of this function is locked under s.blockDeltas, so it is
// save to distribute the notification
s.processor.distribute(deleteNotification{oldObj: old}, false)
}
// IsStopped reports whether the informer has already been stopped
func (s *sharedIndexInformer) IsStopped() bool {
s.startedLock.Lock()
defer s.startedLock.Unlock()
return s.stopped
}
func (s *sharedIndexInformer) RemoveEventHandler(handle ResourceEventHandlerRegistration) error {
s.startedLock.Lock()
defer s.startedLock.Unlock()
// in order to safely remove, we have to
// 1. stop sending add/update/delete notifications
// 2. remove and stop listener
// 3. unblock
s.blockDeltas.Lock()
defer s.blockDeltas.Unlock()
return s.processor.removeListener(handle)
}
// sharedProcessor has a collection of processorListener and can
// distribute a notification object to its listeners. There are two
// kinds of distribute operations. The sync distributions go to a
// subset of the listeners that (a) is recomputed in the occasional
// calls to shouldResync and (b) every listener is initially put in.
// The non-sync distributions go to every listener.
type sharedProcessor struct {
listenersStarted bool
listenersLock sync.RWMutex
// Map from listeners to whether or not they are currently syncing
listeners map[*processorListener]bool
clock clock.Clock
wg wait.Group
}
func (p *sharedProcessor) getListener(registration ResourceEventHandlerRegistration) *processorListener {
p.listenersLock.RLock()
defer p.listenersLock.RUnlock()
if p.listeners == nil {
return nil
}
if result, ok := registration.(*processorListener); ok {
if _, exists := p.listeners[result]; exists {
return result
}
}
return nil
}
func (p *sharedProcessor) addListener(listener *processorListener) ResourceEventHandlerRegistration {
p.listenersLock.Lock()
defer p.listenersLock.Unlock()
if p.listeners == nil {
p.listeners = make(map[*processorListener]bool)
}
p.listeners[listener] = true
if p.listenersStarted {
p.wg.Start(listener.run)
p.wg.Start(listener.pop)
}
return listener
}
func (p *sharedProcessor) removeListener(handle ResourceEventHandlerRegistration) error {
p.listenersLock.Lock()
defer p.listenersLock.Unlock()
listener, ok := handle.(*processorListener)
if !ok {
return fmt.Errorf("invalid key type %t", handle)
} else if p.listeners == nil {
// No listeners are registered, do nothing
return nil
} else if _, exists := p.listeners[listener]; !exists {
// Listener is not registered, just do nothing
return nil
}
delete(p.listeners, listener)
if p.listenersStarted {
close(listener.addCh)
}
return nil
}
func (p *sharedProcessor) distribute(obj interface{}, sync bool) {
p.listenersLock.RLock()
defer p.listenersLock.RUnlock()
for listener, isSyncing := range p.listeners {
if !sync || isSyncing {
listener.add(obj)
}
}
}
func (p *sharedProcessor) run(stopCh <-chan struct{}) {
func() {
p.listenersLock.RLock()
defer p.listenersLock.RUnlock()
for listener := range p.listeners {
p.wg.Start(listener.run)
p.wg.Start(listener.pop)
}
p.listenersStarted = true
}()
<-stopCh
func() {
p.listenersLock.Lock()
defer p.listenersLock.Unlock()
for listener := range p.listeners {
close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop
}
// Wipe out list of listeners since they are now closed
// (processorListener cannot be re-used)
p.listeners = nil
// Reset to false since there are nil listeners, also to block new listeners
// that are added from being run now that the processor was stopped
p.listenersStarted = false
}()
p.wg.Wait() // Wait for all .pop() and .run() to stop
}
// shouldResync queries every listener to determine if any of them need a resync, based on each
// listener's resyncPeriod.
func (p *sharedProcessor) shouldResync() bool {
p.listenersLock.Lock()
defer p.listenersLock.Unlock()
resyncNeeded := false
now := p.clock.Now()
for listener := range p.listeners {
// need to loop through all the listeners to see if they need to resync so we can prepare any
// listeners that are going to be resyncing.
shouldResync := listener.shouldResync(now)
p.listeners[listener] = shouldResync
if shouldResync {
resyncNeeded = true
listener.determineNextResync(now)
}
}
return resyncNeeded
}
func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) {
p.listenersLock.RLock()
defer p.listenersLock.RUnlock()
for listener := range p.listeners {
resyncPeriod := determineResyncPeriod(
listener.requestedResyncPeriod, resyncCheckPeriod)
listener.setResyncPeriod(resyncPeriod)
}
}
// processorListener relays notifications from a sharedProcessor to
// one ResourceEventHandler --- using two goroutines, two unbuffered
// channels, and an unbounded ring buffer. The `add(notification)`
// function sends the given notification to `addCh`. One goroutine
// runs `pop()`, which pumps notifications from `addCh` to `nextCh`
// using storage in the ring buffer while `nextCh` is not keeping up.
// Another goroutine runs `run()`, which receives notifications from
// `nextCh` and synchronously invokes the appropriate handler method.
//
// processorListener also keeps track of the adjusted requested resync
// period of the listener.
type processorListener struct {
nextCh chan interface{}
addCh chan interface{}
handler ResourceEventHandler
// pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed.
// There is one per listener, but a failing/stalled listener will have infinite pendingNotifications
// added until we OOM.
// TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but
// we should try to do something better.
pendingNotifications buffer.RingGrowing
// requestedResyncPeriod is how frequently the listener wants a
// full resync from the shared informer, but modified by two
// adjustments. One is imposing a lower bound,
// `minimumResyncPeriod`. The other is another lower bound, the
// sharedIndexInformer's `resyncCheckPeriod`, that is imposed (a) only
// in AddEventHandlerWithResyncPeriod invocations made after the
// sharedIndexInformer starts and (b) only if the informer does
// resyncs at all.
requestedResyncPeriod time.Duration
// resyncPeriod is the threshold that will be used in the logic
// for this listener. This value differs from
// requestedResyncPeriod only when the sharedIndexInformer does
// not do resyncs, in which case the value here is zero. The
// actual time between resyncs depends on when the
// sharedProcessor's `shouldResync` function is invoked and when
// the sharedIndexInformer processes `Sync` type Delta objects.
resyncPeriod time.Duration
// nextResync is the earliest time the listener should get a full resync
nextResync time.Time
// resyncLock guards access to resyncPeriod and nextResync
resyncLock sync.Mutex
}
func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener {
ret := &processorListener{
nextCh: make(chan interface{}),
addCh: make(chan interface{}),
handler: handler,
pendingNotifications: *buffer.NewRingGrowing(bufferSize),
requestedResyncPeriod: requestedResyncPeriod,
resyncPeriod: resyncPeriod,
}
ret.determineNextResync(now)
return ret
}
func (p *processorListener) add(notification interface{}) {
p.addCh <- notification
}
func (p *processorListener) pop() {
defer utilruntime.HandleCrash()
defer close(p.nextCh) // Tell .run() to stop
var nextCh chan<- interface{}
var notification interface{}
for {
select {
case nextCh <- notification:
// Notification dispatched
var ok bool
notification, ok = p.pendingNotifications.ReadOne()
if !ok { // Nothing to pop
nextCh = nil // Disable this select case
}
case notificationToAdd, ok := <-p.addCh:
if !ok {
return
}
if notification == nil { // No notification to pop (and pendingNotifications is empty)
// Optimize the case - skip adding to pendingNotifications
notification = notificationToAdd
nextCh = p.nextCh
} else { // There is already a notification waiting to be dispatched
p.pendingNotifications.WriteOne(notificationToAdd)
}
}
}
}
func (p *processorListener) run() {
// this call blocks until the channel is closed. When a panic happens during the notification
// we will catch it, **the offending item will be skipped!**, and after a short delay (one second)
// the next notification will be attempted. This is usually better than the alternative of never
// delivering again.
stopCh := make(chan struct{})
wait.Until(func() {
for next := range p.nextCh {
switch notification := next.(type) {
case updateNotification:
p.handler.OnUpdate(notification.oldObj, notification.newObj)
case addNotification:
p.handler.OnAdd(notification.newObj)
case deleteNotification:
p.handler.OnDelete(notification.oldObj)
default:
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next))
}
}
// the only way to get here is if the p.nextCh is empty and closed
close(stopCh)
}, 1*time.Second, stopCh)
}
// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0,
// this always returns false.
func (p *processorListener) shouldResync(now time.Time) bool {
p.resyncLock.Lock()
defer p.resyncLock.Unlock()
if p.resyncPeriod == 0 {
return false
}
return now.After(p.nextResync) || now.Equal(p.nextResync)
}
func (p *processorListener) determineNextResync(now time.Time) {
p.resyncLock.Lock()
defer p.resyncLock.Unlock()
p.nextResync = now.Add(p.resyncPeriod)
}
func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) {
p.resyncLock.Lock()
defer p.resyncLock.Unlock()
p.resyncPeriod = resyncPeriod
}
|
package mqplan
import (
"encoding/base64"
"encoding/hex"
"fmt"
"math"
"math/rand"
"strings"
"time"
"gopkg.in/resty.v0"
"meqa/mqswag"
"meqa/mqutil"
"reflect"
"encoding/json"
"github.com/go-openapi/spec"
"github.com/lucasjones/reggen"
"github.com/xeipuuv/gojsonschema"
)
// The operation code in <meqa ....op> for parameters. The op code at the path level
// is the above Rest methods.
const (
OpRead = "read"
OpWrite = "write"
)
// The class code in <meqa class> for responses.
const (
ClassSuccess = "success"
ClassFail = "fail"
)
const (
ExpectStatus = "status"
)
func GetBaseURL(swagger *mqswag.Swagger) string {
// Prefer http, then https, then others.
scheme := ""
if len(swagger.Schemes) == 0 {
scheme = "http"
} else {
for _, s := range swagger.Schemes {
if s == "http" {
scheme = s
break
} else if s == "https" {
scheme = s
}
}
if len(scheme) == 0 {
scheme = swagger.Schemes[0]
}
}
return scheme + "://" + swagger.Host + swagger.BasePath
}
// Post: old - nil, new - the new object we create.
// Put, patch: old - the old object, new - the new one.
// Get: old - the old object, new - the one we get from the server.
// Delete: old - the existing object, new - nil.
type Comparison struct {
old map[string]interface{} // For put and patch, it stores the keys used in lookup
new map[string]interface{}
schema *spec.Schema
}
func (comp *Comparison) GetMapByOp(op string) map[string]interface{} {
if op == OpRead {
if comp.old == nil {
comp.old = make(map[string]interface{})
}
return comp.old
}
if comp.new == nil {
comp.new = make(map[string]interface{})
}
return comp.new
}
// Test represents a test object in the DSL. Extra care needs to be taken to copy the
// Test before running it, because running it would change the parameter maps.
type Test struct {
Name string `yaml:"name,omitempty"`
Path string `yaml:"path,omitempty"`
Method string `yaml:"method,omitempty"`
Ref string `yaml:"ref,omitempty"`
Expect map[string]interface{} `yaml:"expect,omitempty"`
QueryParams map[string]interface{} `yaml:"queryParams,omitempty"`
BodyParams interface{} `yaml:"bodyParams,omitempty"`
FormParams map[string]interface{} `yaml:"formParams,omitempty"`
PathParams map[string]interface{} `yaml:"pathParams,omitempty"`
HeaderParams map[string]interface{} `yaml:"headerParams,omitempty"`
// Map of Object name (matching definitions) to the Comparison object.
// This tracks what objects we need to add to DB at the end of test.
comparisons map[string]([]*Comparison)
tag *mqswag.MeqaTag // The tag at the top level that describes the test
db *mqswag.DB
op *spec.Operation
resp *resty.Response
err error
}
func (t *Test) Init(db *mqswag.DB) {
t.db = db
if len(t.Method) != 0 {
t.Method = strings.ToLower(t.Method)
}
// if BodyParams is map, after unmarshal it is map[interface{}]
if bodyMap, ok := t.BodyParams.(map[interface{}]interface{}); ok {
newMap := make(map[string]interface{})
for k, v := range bodyMap {
newMap[fmt.Sprint(k)] = v
}
t.BodyParams = newMap
}
}
func (t *Test) Duplicate() *Test {
test := *t
test.Expect = mqutil.MapCopy(test.Expect)
test.QueryParams = mqutil.MapCopy(test.QueryParams)
test.FormParams = mqutil.MapCopy(test.FormParams)
test.PathParams = mqutil.MapCopy(test.PathParams)
test.HeaderParams = mqutil.MapCopy(test.HeaderParams)
if m, ok := test.BodyParams.(map[string]interface{}); ok {
test.BodyParams = mqutil.MapCopy(m)
} else if a, ok := test.BodyParams.([]interface{}); ok {
test.BodyParams = mqutil.ArrayCopy(a)
}
test.tag = nil
test.op = nil
test.resp = nil
test.comparisons = make(map[string]([]*Comparison))
test.err = nil
return &test
}
func (t *Test) AddBasicComparison(tag *mqswag.MeqaTag, paramSpec *spec.Parameter, data interface{}) {
if paramSpec == nil {
return
}
if tag == nil || len(tag.Class) == 0 || len(tag.Property) == 0 {
// No explicit tag. Info we have: t.Method, t.tag - indicate what operation we want to do.
// t.path - indicate what object we want to operate on. We need to extrace the equivalent
// of the tag. This is usually done on server, here we just make a simple effort.
// TODO
return
}
// It's possible that we are updating a list of objects. Due to the way we generate parameters,
// we will always generate one complete object (both the lookup key and the new data) before we
// move on to the next. If we find a collision, we know we need to create a new Comparison object.
var op string
if len(tag.Operation) > 0 {
op = tag.Operation
} else {
if paramSpec.In == "formData" || paramSpec.In == "body" {
op = OpWrite
} else {
op = OpRead
}
}
var comp *Comparison
if t.comparisons[tag.Class] != nil && len(t.comparisons[tag.Class]) > 0 {
comp = t.comparisons[tag.Class][len(t.comparisons[tag.Class])-1]
m := comp.GetMapByOp(op)
if _, ok := m[tag.Property]; !ok {
m[tag.Property] = data
return
}
}
// Need to create a new compare object.
comp = &Comparison{}
comp.schema = (*spec.Schema)(t.db.Swagger.FindSchemaByName(tag.Class))
m := comp.GetMapByOp(op)
m[tag.Property] = data
t.comparisons[tag.Class] = append(t.comparisons[tag.Class], comp)
}
func (t *Test) AddObjectComparison(class string, method string, obj map[string]interface{}, schema *spec.Schema) {
if method == mqswag.MethodPost {
t.comparisons[class] = append(t.comparisons[class], &Comparison{nil, obj, schema})
} else if method == mqswag.MethodPut || method == mqswag.MethodPatch {
// It's possible that we are updating a list of objects. Due to the way we generate parameters,
// we will always generate one complete object (both the lookup key and the new data) before we
// move on to the next.
if t.comparisons[class] != nil && len(t.comparisons[class]) > 0 {
last := t.comparisons[class][len(t.comparisons[class])-1]
if last.new == nil {
last.new = obj
return
}
// During put, having an array of objects with just the "new" part is allowed. This
// means the update key is included in the new object.
}
t.comparisons[class] = append(t.comparisons[class], &Comparison{nil, obj, schema})
} else {
mqutil.Logger.Printf("unexpected: generating object %s for GET method.", class)
}
}
// ProcessOneComparison processes one comparison object.
func (t *Test) ProcessOneComparison(className string, comp *Comparison, resultArray []interface{}) error {
method := t.Method
if t.tag != nil && len(t.tag.Operation) > 0 {
method = t.tag.Operation
}
if method == mqswag.MethodGet {
var matchFunc mqswag.MatchFunc
if comp.old == nil {
matchFunc = mqswag.MatchAlways
} else {
matchFunc = mqswag.MatchAllFields
}
dbArray := t.db.Find(className, comp.old, matchFunc, -1)
// What we found from the server (resultArray) and from in-memory DB using the same criteria should match.
if len(resultArray) != len(dbArray) {
resultBytes, _ := json.Marshal(resultArray)
mqutil.Logger.Print(string(resultBytes))
return mqutil.NewError(mqutil.ErrHttp, fmt.Sprintf("expecting %d entries got %d entries",
len(dbArray), len(resultArray)))
}
// TODO optimize later. Should sort first.
for _, entry := range resultArray {
found := false
entryMap, _ := entry.(map[string]interface{})
if entryMap == nil {
if len(dbArray) == 0 {
// Server returned array of non-map types. The db shouldn't expect anything.
continue
}
} else {
for _, dbEntry := range dbArray {
dbentryMap, _ := dbEntry.(map[string]interface{})
if dbentryMap != nil && mqutil.MapEquals(entryMap, dbentryMap, false) {
found = true
break
}
}
}
if !found {
b, _ := json.Marshal(entry)
return mqutil.NewError(mqutil.ErrHttp, fmt.Sprintf("result returned is not found on client\n%s\n",
string(b)))
}
}
} else if method == mqswag.MethodDelete {
t.db.Delete(className, comp.old, mqswag.MatchAllFields, -1)
} else if method == mqswag.MethodPost {
return t.db.Insert(className, comp.schema, comp.new)
} else if method == mqswag.MethodPatch || method == mqswag.MethodPut {
count := t.db.Update(className, comp.old, mqswag.MatchAllFields, comp.new, 1, method == mqswag.MethodPatch)
if count != 1 {
mqutil.Logger.Printf("Failed to find any entry to update")
}
}
return nil
}
func (t *Test) GetParamFromComparison(name string, where string) interface{} {
for _, compList := range t.comparisons {
for _, comp := range compList {
if v := comp.new[name]; v != nil && (where == "any" || where == "new") {
return v
}
if v := comp.old[name]; v != nil && (where == "any" || where == "old") {
return v
}
}
}
return nil
}
// ProcessResult decodes the response from the server into a result array
func (t *Test) ProcessResult(resp *resty.Response) error {
t.resp = resp
status := resp.StatusCode()
var respSpec *spec.Response
if t.op.Responses != nil {
respObject, ok := t.op.Responses.StatusCodeResponses[status]
if ok {
respSpec = &respObject
} else {
respSpec = t.op.Responses.Default
}
}
if respSpec == nil {
// Nothing specified in the swagger.json. Same as an empty spec.
respSpec = &spec.Response{}
}
respBody := resp.Body()
// Check if the response obj and respSchema match
respSchema := (*mqswag.Schema)(respSpec.Schema)
var resultObj interface{}
if respSchema != nil {
if len(respBody) > 0 {
err := json.Unmarshal(respBody, &resultObj)
if err == nil {
if !respSchema.Matches(resultObj, t.db.Swagger) {
return mqutil.NewError(mqutil.ErrServerResp, fmt.Sprintf("server response doesn't match swagger spec: %s", string(respBody)))
}
} else if !respSchema.Type.Contains(gojsonschema.TYPE_STRING) {
return mqutil.NewError(mqutil.ErrServerResp, fmt.Sprintf("server response doesn't match swagger spec: %s", string(respBody)))
}
} else {
// If schema is an array, then not having a body is OK
if !respSchema.Type.Contains(gojsonschema.TYPE_ARRAY) {
return mqutil.NewError(mqutil.ErrServerResp, fmt.Sprintf("swagger.spec expects a non-empty response, but response body is actually empty"))
}
}
// Remove the comparison objects that 1) swagger doesn't expect back 2) we didn't modify
for className, compList := range t.comparisons {
count := 0
for i, comp := range compList {
if comp.new == nil && !respSchema.Contains(className, t.db.Swagger) {
compList[i] = compList[count]
count++
}
}
t.comparisons[className] = compList[count:]
}
}
// success based on return status
success := (status >= 200 && status < 300)
tag := mqswag.GetMeqaTag(respSpec.Description)
if tag != nil && tag.Class == ClassFail {
success = false
}
if t.Expect != nil && t.Expect[ExpectStatus] != nil {
expectedStatus := t.Expect[ExpectStatus]
if expectedStatus == "fail" {
success = !success
} else if expectedStatusNum, ok := expectedStatus.(int); ok {
success = (expectedStatusNum == status)
} else {
success = false
}
}
if !success {
actuallyFailed := true
if actuallyFailed {
mqutil.Logger.Printf("=== test failed, response code %d ===", status)
}
return nil
}
var resultArray []interface{}
var ok bool
if resultObj != nil {
if resultArray, ok = resultObj.([]interface{}); !ok {
resultArray = []interface{}{resultObj}
}
}
// Success, replace or verify based on method.
for className, compArray := range t.comparisons {
for _, c := range compArray {
err := t.ProcessOneComparison(className, c, resultArray)
if err != nil {
return err
}
}
}
return nil
}
// SetRequestParameters sets the parameters. Returns the new request path.
func (t *Test) SetRequestParameters(req *resty.Request) string {
if len(t.QueryParams) > 0 {
req.SetQueryParams(mqutil.MapInterfaceToMapString(t.QueryParams))
mqutil.InterfacePrint(t.QueryParams, "queryParams:\n")
}
if t.BodyParams != nil {
req.SetBody(t.BodyParams)
mqutil.InterfacePrint(t.BodyParams, "bodyParams:\n")
}
if len(t.HeaderParams) > 0 {
req.SetHeaders(mqutil.MapInterfaceToMapString(t.HeaderParams))
mqutil.InterfacePrint(t.HeaderParams, "headerParams:\n")
}
if len(t.FormParams) > 0 {
req.SetFormData(mqutil.MapInterfaceToMapString(t.FormParams))
mqutil.InterfacePrint(t.FormParams, "formParams:\n")
}
path := t.Path
if len(t.PathParams) > 0 {
PathParamsStr := mqutil.MapInterfaceToMapString(t.PathParams)
for k, v := range PathParamsStr {
path = strings.Replace(path, "{"+k+"}", v, -1)
}
mqutil.InterfacePrint(t.PathParams, "pathParams:\n")
}
return path
}
func (t *Test) CopyParams(parentTest *Test) {
if parentTest != nil {
t.Expect = mqutil.MapCopy(parentTest.Expect)
t.QueryParams = mqutil.MapCombine(t.QueryParams, parentTest.QueryParams)
t.PathParams = mqutil.MapCombine(t.PathParams, parentTest.PathParams)
t.HeaderParams = mqutil.MapCombine(t.HeaderParams, parentTest.HeaderParams)
t.FormParams = mqutil.MapCombine(t.FormParams, parentTest.FormParams)
if parentTest.BodyParams != nil {
if t.BodyParams == nil {
t.BodyParams = parentTest.BodyParams
} else {
// replace with parent only if the types are the same
if parentBodyMap, ok := parentTest.BodyParams.(map[string]interface{}); ok {
if bodyMap, ok := t.BodyParams.(map[string]interface{}); ok {
t.BodyParams = mqutil.MapCombine(bodyMap, parentBodyMap)
}
} else {
// For non-map types, just replace with parent if they are the same type.
if reflect.TypeOf(parentTest.BodyParams) == reflect.TypeOf(t.BodyParams) {
t.BodyParams = parentTest.BodyParams
}
}
}
}
}
}
// Run runs the test. Returns the test result.
func (t *Test) Run(plan *TestPlan) error {
mqutil.Logger.Print("\n--- " + t.Name)
err := t.ResolveParameters(plan)
if err != nil {
return err
}
req := resty.R()
if len(plan.Username) > 0 {
req.SetBasicAuth(plan.Username, plan.Password)
}
path := GetBaseURL(t.db.Swagger) + t.SetRequestParameters(req)
var resp *resty.Response
switch t.Method {
case mqswag.MethodGet:
resp, err = req.Get(path)
case mqswag.MethodPost:
resp, err = req.Post(path)
case mqswag.MethodPut:
resp, err = req.Put(path)
case mqswag.MethodDelete:
resp, err = req.Delete(path)
case mqswag.MethodPatch:
resp, err = req.Patch(path)
case mqswag.MethodHead:
resp, err = req.Head(path)
case mqswag.MethodOptions:
resp, err = req.Options(path)
default:
return mqutil.NewError(mqutil.ErrInvalid, fmt.Sprintf("Unknown method in test %s: %v", t.Name, t.Method))
}
if err != nil {
return mqutil.NewError(mqutil.ErrHttp, err.Error())
}
// TODO properly process resp. Check against the current DB to see if they match
mqutil.Logger.Print(resp.Status())
mqutil.Logger.Println(string(resp.Body()))
return t.ProcessResult(resp)
}
// GetSchemaRootType gets the real object type fo the specified schema. It only returns meaningful
// data for object and array of object type of parameters. If the parameter is a basic type it returns
// nil
func (t *Test) GetSchemaRootType(schema *mqswag.Schema, parentTag *mqswag.MeqaTag) (*mqswag.MeqaTag, *mqswag.Schema) {
tag := mqswag.GetMeqaTag(schema.Description)
if tag == nil {
tag = parentTag
}
referenceName, referredSchema, err := t.db.Swagger.GetReferredSchema((*mqswag.Schema)(schema))
if err != nil {
mqutil.Logger.Print(err)
return nil, nil
}
if referredSchema != nil {
if tag == nil {
tag = &mqswag.MeqaTag{referenceName, "", ""}
}
return t.GetSchemaRootType(referredSchema, tag)
}
if len(schema.Enum) != 0 {
return nil, nil
}
if len(schema.Type) == 0 {
return nil, nil
}
if schema.Type.Contains(gojsonschema.TYPE_ARRAY) {
var itemSchema *spec.Schema
if len(schema.Items.Schemas) != 0 {
itemSchema = &(schema.Items.Schemas[0])
} else {
itemSchema = schema.Items.Schema
}
return t.GetSchemaRootType((*mqswag.Schema)(itemSchema), tag)
} else if schema.Type.Contains(gojsonschema.TYPE_OBJECT) {
return tag, schema
}
return nil, nil
}
func StringParamsResolveWithHistory(str string, h *TestHistory) interface{} {
begin := strings.Index(str, "<")
end := strings.Index(str, ">")
if end > begin {
ar := strings.Split(str[begin+1:end], ".")
if len(ar) != 2 {
mqutil.Logger.Printf("invalid parameter: %s", str[begin+1:end])
return nil
}
t := h.GetTest(ar[0])
if t != nil {
return t.GetParamFromComparison(ar[1], "any")
}
}
return nil
}
func MapParamsResolveWithHistory(paramMap map[string]interface{}, h *TestHistory) {
for k, v := range paramMap {
if str, ok := v.(string); ok {
if result := StringParamsResolveWithHistory(str, h); result != nil {
paramMap[k] = result
}
}
}
}
func ArrayParamsResolveWithHistory(paramArray []interface{}, h *TestHistory) {
for i, param := range paramArray {
if paramMap, ok := param.(map[string]interface{}); ok {
MapParamsResolveWithHistory(paramMap, h)
} else if str, ok := param.(string); ok {
if result := StringParamsResolveWithHistory(str, h); result != nil {
paramArray[i] = result
}
}
}
}
func (t *Test) ResolveHistoryParameters(h *TestHistory) {
MapParamsResolveWithHistory(t.PathParams, h)
MapParamsResolveWithHistory(t.FormParams, h)
MapParamsResolveWithHistory(t.HeaderParams, h)
MapParamsResolveWithHistory(t.QueryParams, h)
if bodyMap, ok := t.BodyParams.(map[string]interface{}); ok {
MapParamsResolveWithHistory(bodyMap, h)
} else if bodyArray, ok := t.BodyParams.([]interface{}); ok {
ArrayParamsResolveWithHistory(bodyArray, h)
} else if bodyStr, ok := t.BodyParams.(string); ok {
result := StringParamsResolveWithHistory(bodyStr, h)
if result != nil {
t.BodyParams = result
}
}
}
// ResolveParameters fullfills the parameters for the specified request using the in-mem DB.
// The resolved parameters will be added to test.Parameters map.
func (t *Test) ResolveParameters(plan *TestPlan) error {
pathItem := t.db.Swagger.Paths.Paths[t.Path]
t.op = getOperationByMethod(&pathItem, t.Method)
if t.op == nil {
return mqutil.NewError(mqutil.ErrNotFound, fmt.Sprintf("Path %s not found in swagger file", t.Path))
}
t.tag = mqswag.GetMeqaTag(t.op.Description)
var paramsMap map[string]interface{}
var globalParamsMap map[string]interface{}
var err error
var genParam interface{}
for _, params := range t.op.Parameters {
if params.In == "body" {
if t.BodyParams != nil {
// There is only one body parameter. No need to check name. In fact, we don't
// even store the name in the DSL.
objarray := mqutil.InterfaceToArray(t.BodyParams)
paramTag, schema := t.GetSchemaRootType((*mqswag.Schema)(params.Schema), mqswag.GetMeqaTag(params.Description))
method := t.Method
if t.tag != nil && len(t.tag.Operation) > 0 {
method = t.tag.Operation
}
if schema != nil && paramTag != nil {
for _, obj := range objarray {
t.AddObjectComparison(paramTag.Class, method, obj, (*spec.Schema)(schema))
}
}
continue
}
genParam, err = t.GenerateParameter(¶ms, t.db)
t.BodyParams = genParam
} else {
switch params.In {
case "path":
if t.PathParams == nil {
t.PathParams = make(map[string]interface{})
}
paramsMap = t.PathParams
globalParamsMap = plan.PathParams
case "query":
if t.QueryParams == nil {
t.QueryParams = make(map[string]interface{})
}
paramsMap = t.QueryParams
globalParamsMap = plan.QueryParams
case "header":
if t.HeaderParams == nil {
t.HeaderParams = make(map[string]interface{})
}
paramsMap = t.HeaderParams
globalParamsMap = plan.HeaderParams
case "formData":
if t.FormParams == nil {
t.FormParams = make(map[string]interface{})
}
paramsMap = t.FormParams
globalParamsMap = plan.FormParams
}
// If there is a parameter passed in, just use it. Otherwise generate one.
if paramsMap[params.Name] == nil && globalParamsMap[params.Name] != nil {
paramsMap[params.Name] = globalParamsMap[params.Name]
}
if _, ok := paramsMap[params.Name]; ok {
t.AddBasicComparison(mqswag.GetMeqaTag(params.Description), ¶ms, paramsMap[params.Name])
continue
}
genParam, err = t.GenerateParameter(¶ms, t.db)
paramsMap[params.Name] = genParam
}
if err != nil {
return err
}
}
return nil
}
func getOperationByMethod(item *spec.PathItem, method string) *spec.Operation {
switch method {
case mqswag.MethodGet:
return item.Get
case mqswag.MethodPost:
return item.Post
case mqswag.MethodPut:
return item.Put
case mqswag.MethodDelete:
return item.Delete
case mqswag.MethodPatch:
return item.Patch
case mqswag.MethodHead:
return item.Head
case mqswag.MethodOptions:
return item.Options
}
return nil
}
// GenerateParameter generates paramter value based on the spec.
func (t *Test) GenerateParameter(paramSpec *spec.Parameter, db *mqswag.DB) (interface{}, error) {
tag := mqswag.GetMeqaTag(paramSpec.Description)
if paramSpec.Schema != nil {
return t.GenerateSchema(paramSpec.Name, tag, paramSpec.Schema, db)
}
if len(paramSpec.Enum) != 0 {
return generateEnum(paramSpec.Enum)
}
if len(paramSpec.Type) == 0 {
return nil, mqutil.NewError(mqutil.ErrInvalid, "Parameter doesn't have type")
}
var schema *spec.Schema
if paramSpec.Schema != nil {
schema = paramSpec.Schema
} else {
// construct a full schema from simple ones
schema = (*spec.Schema)(mqswag.CreateSchemaFromSimple(¶mSpec.SimpleSchema, ¶mSpec.CommonValidations))
}
if paramSpec.Type == gojsonschema.TYPE_OBJECT {
return t.generateObject("param_", tag, schema, db)
}
if paramSpec.Type == gojsonschema.TYPE_ARRAY {
return t.generateArray("param_", tag, schema, db)
}
return t.generateByType(schema, paramSpec.Name+"_", tag, paramSpec)
}
// Two ways to get to generateByType
// 1) directly called from GenerateParameter, now we know the type is a parameter, and we want to add to comparison
// 2) called at bottom level, here we know the object will be added to comparison and not the type primitives.
func (t *Test) generateByType(s *spec.Schema, prefix string, parentTag *mqswag.MeqaTag, paramSpec *spec.Parameter) (interface{}, error) {
tag := mqswag.GetMeqaTag(s.Description)
if tag == nil {
tag = parentTag
}
if paramSpec != nil {
if tag != nil && len(tag.Property) > 0 {
// Try to get one from the comparison objects.
for _, c := range t.comparisons[tag.Class] {
if c.old != nil {
return c.old[tag.Property], nil
}
}
// Get one from in-mem db and populate the comparison structure.
ar := t.db.Find(tag.Class, nil, mqswag.MatchAlways, 5)
if len(ar) > 0 {
obj := ar[rand.Intn(len(ar))].(map[string]interface{})
comp := &Comparison{obj, nil, (*spec.Schema)(t.db.GetSchema(tag.Class))}
t.comparisons[tag.Class] = append(t.comparisons[tag.Class], comp)
return obj[tag.Property], nil
}
}
}
if len(s.Type) != 0 {
var result interface{}
var err error
switch s.Type[0] {
case gojsonschema.TYPE_BOOLEAN:
result, err = generateBool(s)
case gojsonschema.TYPE_INTEGER:
result, err = generateInt(s)
case gojsonschema.TYPE_NUMBER:
result, err = generateFloat(s)
case gojsonschema.TYPE_STRING:
result, err = generateString(s, prefix)
case "file":
result, err = reggen.Generate("[0-9]+", 200)
}
if result != nil && err == nil {
t.AddBasicComparison(tag, paramSpec, result)
return result, err
}
}
return nil, mqutil.NewError(mqutil.ErrInvalid, fmt.Sprintf("unrecognized type: %s", s.Type))
}
// RandomTime generate a random time in the range of [t - r, t).
func RandomTime(t time.Time, r time.Duration) time.Time {
return t.Add(-time.Duration(float64(r) * rand.Float64()))
}
// TODO we need to make it context aware. Based on different contexts we should generate different
// date ranges. Prefix is a prefix to use when generating strings. It's only used when there is
// no specified pattern in the swagger.json
func generateString(s *spec.Schema, prefix string) (string, error) {
if s.Format == "date-time" {
t := RandomTime(time.Now(), time.Hour*24*30)
return t.Format(time.RFC3339), nil
}
if s.Format == "date" {
t := RandomTime(time.Now(), time.Hour*24*30)
return t.Format("2006-01-02"), nil
}
// If no pattern is specified, we use the field name + some numbers as pattern
var pattern string
length := 0
if len(s.Pattern) != 0 {
pattern = s.Pattern
length = len(s.Pattern) * 2
} else {
pattern = prefix + "\\d+"
length = len(prefix) + 5
}
str, err := reggen.Generate(pattern, length)
if err != nil {
return "", mqutil.NewError(mqutil.ErrInvalid, err.Error())
}
if len(s.Format) == 0 || s.Format == "password" {
return str, nil
}
if s.Format == "byte" {
return base64.StdEncoding.EncodeToString([]byte(str)), nil
}
if s.Format == "binary" {
return hex.EncodeToString([]byte(str)), nil
}
return "", mqutil.NewError(mqutil.ErrInvalid, fmt.Sprintf("Invalid format string: %s", s.Format))
}
func generateBool(s *spec.Schema) (interface{}, error) {
return rand.Intn(2) == 0, nil
}
func generateFloat(s *spec.Schema) (float64, error) {
var realmin float64
if s.Minimum != nil {
realmin = *s.Minimum
if s.ExclusiveMinimum {
realmin += 0.01
}
}
var realmax float64
if s.Maximum != nil {
realmax = *s.Maximum
if s.ExclusiveMaximum {
realmax -= 0.01
}
}
if realmin >= realmax {
if s.Minimum == nil && s.Maximum == nil {
realmin = -1.0
realmax = 1.0
} else if s.Minimum != nil {
realmax = realmin + math.Abs(realmin)
} else if s.Maximum != nil {
realmin = realmax - math.Abs(realmax)
} else {
// both are present but conflicting
return 0, mqutil.NewError(mqutil.ErrInvalid, fmt.Sprintf("specified min value %v is bigger than max %v",
*s.Minimum, *s.Maximum))
}
}
return rand.Float64()*(realmax-realmin) + realmin, nil
}
func generateInt(s *spec.Schema) (int64, error) {
// Give a default range if there isn't one
if s.Maximum == nil && s.Minimum == nil {
maxf := 10000.0
s.Maximum = &maxf
}
f, err := generateFloat(s)
if err != nil {
return 0, err
}
i := int64(f)
if s.Minimum != nil && i <= int64(*s.Minimum) {
i++
}
return i, nil
}
func (t *Test) generateArray(name string, parentTag *mqswag.MeqaTag, schema *spec.Schema, db *mqswag.DB) (interface{}, error) {
var numItems int
if schema.MaxItems != nil || schema.MinItems != nil {
var maxItems int
if schema.MaxItems != nil {
maxItems = int(*schema.MaxItems)
if maxItems < 0 {
maxItems = 0
}
}
var minItems int
if schema.MinItems != nil {
minItems = int(*schema.MinItems)
if minItems < 0 {
minItems = 0
}
}
maxDiff := maxItems - minItems
if maxDiff <= 0 {
maxDiff = 1
}
numItems = rand.Intn(int(maxDiff)) + minItems
} else {
numItems = rand.Intn(10)
}
var itemSchema *spec.Schema
if len(schema.Items.Schemas) != 0 {
itemSchema = &(schema.Items.Schemas[0])
} else {
itemSchema = schema.Items.Schema
}
tag := mqswag.GetMeqaTag(schema.Description)
if tag == nil {
tag = parentTag
}
var ar []interface{}
var hash map[interface{}]interface{}
if schema.UniqueItems {
hash = make(map[interface{}]interface{})
}
for i := 0; i < numItems; i++ {
entry, err := t.GenerateSchema(name, tag, itemSchema, db)
if err != nil {
return nil, err
}
if hash != nil && hash[entry] != nil {
continue
}
ar = append(ar, entry)
if hash != nil {
hash[entry] = 1
}
}
return ar, nil
}
func (t *Test) generateObject(name string, parentTag *mqswag.MeqaTag, schema *spec.Schema, db *mqswag.DB) (interface{}, error) {
obj := make(map[string]interface{})
for k, v := range schema.Properties {
propertyTag := mqswag.GetMeqaTag(v.Description)
if propertyTag == nil {
propertyTag = parentTag
}
o, err := t.GenerateSchema(k+"_", propertyTag, &v, db)
if err != nil {
return nil, err
}
obj[k] = o
}
tag := mqswag.GetMeqaTag(schema.Description)
if tag == nil {
tag = parentTag
}
var class, method string
if tag != nil {
class = tag.Class
}
if t.tag != nil && len(t.tag.Operation) > 0 {
method = t.tag.Operation // At test level the tag indicates the real method
} else {
method = t.Method
}
if len(class) == 0 {
cl, s := db.FindMatchingSchema(obj)
if s == nil {
mqutil.Logger.Printf("Can't find a known schema for obj %s", name)
return obj, nil
}
class = cl
}
t.AddObjectComparison(class, method, obj, schema)
return obj, nil
}
func (t *Test) GenerateSchema(name string, tag *mqswag.MeqaTag, schema *spec.Schema, db *mqswag.DB) (interface{}, error) {
swagger := db.Swagger
// Deal with refs.
referenceName, referredSchema, err := swagger.GetReferredSchema((*mqswag.Schema)(schema))
if err != nil {
return nil, err
}
if referredSchema != nil {
var paramTag mqswag.MeqaTag
if tag != nil {
paramTag = mqswag.MeqaTag{referenceName, tag.Property, tag.Operation}
} else {
paramTag = mqswag.MeqaTag{referenceName, "", ""}
}
return t.GenerateSchema(name, ¶mTag, (*spec.Schema)(referredSchema), db)
}
if len(schema.Enum) != 0 {
return generateEnum(schema.Enum)
}
if len(schema.Type) == 0 {
return nil, mqutil.NewError(mqutil.ErrInvalid, "Parameter doesn't have type")
}
if schema.Type[0] == gojsonschema.TYPE_OBJECT {
return t.generateObject(name, tag, schema, db)
}
if schema.Type[0] == gojsonschema.TYPE_ARRAY {
return t.generateArray(name, tag, schema, db)
}
return t.generateByType(schema, name, tag, nil)
}
func generateEnum(e []interface{}) (interface{}, error) {
return e[rand.Intn(len(e))], nil
}
Combine path params and operation params.
package mqplan
import (
"encoding/base64"
"encoding/hex"
"fmt"
"math"
"math/rand"
"strings"
"time"
"gopkg.in/resty.v0"
"meqa/mqswag"
"meqa/mqutil"
"reflect"
"encoding/json"
"github.com/go-openapi/spec"
"github.com/lucasjones/reggen"
"github.com/xeipuuv/gojsonschema"
)
// The operation code in <meqa ....op> for parameters. The op code at the path level
// is the above Rest methods.
const (
OpRead = "read"
OpWrite = "write"
)
// The class code in <meqa class> for responses.
const (
ClassSuccess = "success"
ClassFail = "fail"
)
const (
ExpectStatus = "status"
)
func GetBaseURL(swagger *mqswag.Swagger) string {
// Prefer http, then https, then others.
scheme := ""
if len(swagger.Schemes) == 0 {
scheme = "http"
} else {
for _, s := range swagger.Schemes {
if s == "http" {
scheme = s
break
} else if s == "https" {
scheme = s
}
}
if len(scheme) == 0 {
scheme = swagger.Schemes[0]
}
}
return scheme + "://" + swagger.Host + swagger.BasePath
}
// Post: old - nil, new - the new object we create.
// Put, patch: old - the old object, new - the new one.
// Get: old - the old object, new - the one we get from the server.
// Delete: old - the existing object, new - nil.
type Comparison struct {
old map[string]interface{} // For put and patch, it stores the keys used in lookup
new map[string]interface{}
schema *spec.Schema
}
func (comp *Comparison) GetMapByOp(op string) map[string]interface{} {
if op == OpRead {
if comp.old == nil {
comp.old = make(map[string]interface{})
}
return comp.old
}
if comp.new == nil {
comp.new = make(map[string]interface{})
}
return comp.new
}
// Test represents a test object in the DSL. Extra care needs to be taken to copy the
// Test before running it, because running it would change the parameter maps.
type Test struct {
Name string `yaml:"name,omitempty"`
Path string `yaml:"path,omitempty"`
Method string `yaml:"method,omitempty"`
Ref string `yaml:"ref,omitempty"`
Expect map[string]interface{} `yaml:"expect,omitempty"`
QueryParams map[string]interface{} `yaml:"queryParams,omitempty"`
BodyParams interface{} `yaml:"bodyParams,omitempty"`
FormParams map[string]interface{} `yaml:"formParams,omitempty"`
PathParams map[string]interface{} `yaml:"pathParams,omitempty"`
HeaderParams map[string]interface{} `yaml:"headerParams,omitempty"`
// Map of Object name (matching definitions) to the Comparison object.
// This tracks what objects we need to add to DB at the end of test.
comparisons map[string]([]*Comparison)
tag *mqswag.MeqaTag // The tag at the top level that describes the test
db *mqswag.DB
op *spec.Operation
resp *resty.Response
err error
}
func (t *Test) Init(db *mqswag.DB) {
t.db = db
if len(t.Method) != 0 {
t.Method = strings.ToLower(t.Method)
}
// if BodyParams is map, after unmarshal it is map[interface{}]
if bodyMap, ok := t.BodyParams.(map[interface{}]interface{}); ok {
newMap := make(map[string]interface{})
for k, v := range bodyMap {
newMap[fmt.Sprint(k)] = v
}
t.BodyParams = newMap
}
}
func (t *Test) Duplicate() *Test {
test := *t
test.Expect = mqutil.MapCopy(test.Expect)
test.QueryParams = mqutil.MapCopy(test.QueryParams)
test.FormParams = mqutil.MapCopy(test.FormParams)
test.PathParams = mqutil.MapCopy(test.PathParams)
test.HeaderParams = mqutil.MapCopy(test.HeaderParams)
if m, ok := test.BodyParams.(map[string]interface{}); ok {
test.BodyParams = mqutil.MapCopy(m)
} else if a, ok := test.BodyParams.([]interface{}); ok {
test.BodyParams = mqutil.ArrayCopy(a)
}
test.tag = nil
test.op = nil
test.resp = nil
test.comparisons = make(map[string]([]*Comparison))
test.err = nil
return &test
}
func (t *Test) AddBasicComparison(tag *mqswag.MeqaTag, paramSpec *spec.Parameter, data interface{}) {
if paramSpec == nil {
return
}
if tag == nil || len(tag.Class) == 0 || len(tag.Property) == 0 {
// No explicit tag. Info we have: t.Method, t.tag - indicate what operation we want to do.
// t.path - indicate what object we want to operate on. We need to extrace the equivalent
// of the tag. This is usually done on server, here we just make a simple effort.
// TODO
return
}
// It's possible that we are updating a list of objects. Due to the way we generate parameters,
// we will always generate one complete object (both the lookup key and the new data) before we
// move on to the next. If we find a collision, we know we need to create a new Comparison object.
var op string
if len(tag.Operation) > 0 {
op = tag.Operation
} else {
if paramSpec.In == "formData" || paramSpec.In == "body" {
op = OpWrite
} else {
op = OpRead
}
}
var comp *Comparison
if t.comparisons[tag.Class] != nil && len(t.comparisons[tag.Class]) > 0 {
comp = t.comparisons[tag.Class][len(t.comparisons[tag.Class])-1]
m := comp.GetMapByOp(op)
if _, ok := m[tag.Property]; !ok {
m[tag.Property] = data
return
}
}
// Need to create a new compare object.
comp = &Comparison{}
comp.schema = (*spec.Schema)(t.db.Swagger.FindSchemaByName(tag.Class))
m := comp.GetMapByOp(op)
m[tag.Property] = data
t.comparisons[tag.Class] = append(t.comparisons[tag.Class], comp)
}
func (t *Test) AddObjectComparison(class string, method string, obj map[string]interface{}, schema *spec.Schema) {
if method == mqswag.MethodPost {
t.comparisons[class] = append(t.comparisons[class], &Comparison{nil, obj, schema})
} else if method == mqswag.MethodPut || method == mqswag.MethodPatch {
// It's possible that we are updating a list of objects. Due to the way we generate parameters,
// we will always generate one complete object (both the lookup key and the new data) before we
// move on to the next.
if t.comparisons[class] != nil && len(t.comparisons[class]) > 0 {
last := t.comparisons[class][len(t.comparisons[class])-1]
if last.new == nil {
last.new = obj
return
}
// During put, having an array of objects with just the "new" part is allowed. This
// means the update key is included in the new object.
}
t.comparisons[class] = append(t.comparisons[class], &Comparison{nil, obj, schema})
} else {
mqutil.Logger.Printf("unexpected: generating object %s for GET method.", class)
}
}
// ProcessOneComparison processes one comparison object.
func (t *Test) ProcessOneComparison(className string, comp *Comparison, resultArray []interface{}) error {
method := t.Method
if t.tag != nil && len(t.tag.Operation) > 0 {
method = t.tag.Operation
}
if method == mqswag.MethodGet {
var matchFunc mqswag.MatchFunc
if comp.old == nil {
matchFunc = mqswag.MatchAlways
} else {
matchFunc = mqswag.MatchAllFields
}
dbArray := t.db.Find(className, comp.old, matchFunc, -1)
// What we found from the server (resultArray) and from in-memory DB using the same criteria should match.
if len(resultArray) != len(dbArray) {
resultBytes, _ := json.Marshal(resultArray)
mqutil.Logger.Print(string(resultBytes))
return mqutil.NewError(mqutil.ErrHttp, fmt.Sprintf("expecting %d entries got %d entries",
len(dbArray), len(resultArray)))
}
// TODO optimize later. Should sort first.
for _, entry := range resultArray {
found := false
entryMap, _ := entry.(map[string]interface{})
if entryMap == nil {
if len(dbArray) == 0 {
// Server returned array of non-map types. The db shouldn't expect anything.
continue
}
} else {
for _, dbEntry := range dbArray {
dbentryMap, _ := dbEntry.(map[string]interface{})
if dbentryMap != nil && mqutil.MapEquals(entryMap, dbentryMap, false) {
found = true
break
}
}
}
if !found {
b, _ := json.Marshal(entry)
return mqutil.NewError(mqutil.ErrHttp, fmt.Sprintf("result returned is not found on client\n%s\n",
string(b)))
}
}
} else if method == mqswag.MethodDelete {
t.db.Delete(className, comp.old, mqswag.MatchAllFields, -1)
} else if method == mqswag.MethodPost {
return t.db.Insert(className, comp.schema, comp.new)
} else if method == mqswag.MethodPatch || method == mqswag.MethodPut {
count := t.db.Update(className, comp.old, mqswag.MatchAllFields, comp.new, 1, method == mqswag.MethodPatch)
if count != 1 {
mqutil.Logger.Printf("Failed to find any entry to update")
}
}
return nil
}
func (t *Test) GetParamFromComparison(name string, where string) interface{} {
for _, compList := range t.comparisons {
for _, comp := range compList {
if v := comp.new[name]; v != nil && (where == "any" || where == "new") {
return v
}
if v := comp.old[name]; v != nil && (where == "any" || where == "old") {
return v
}
}
}
return nil
}
// ProcessResult decodes the response from the server into a result array
func (t *Test) ProcessResult(resp *resty.Response) error {
t.resp = resp
status := resp.StatusCode()
var respSpec *spec.Response
if t.op.Responses != nil {
respObject, ok := t.op.Responses.StatusCodeResponses[status]
if ok {
respSpec = &respObject
} else {
respSpec = t.op.Responses.Default
}
}
if respSpec == nil {
// Nothing specified in the swagger.json. Same as an empty spec.
respSpec = &spec.Response{}
}
respBody := resp.Body()
// Check if the response obj and respSchema match
respSchema := (*mqswag.Schema)(respSpec.Schema)
var resultObj interface{}
if respSchema != nil {
if len(respBody) > 0 {
err := json.Unmarshal(respBody, &resultObj)
if err == nil {
if !respSchema.Matches(resultObj, t.db.Swagger) {
return mqutil.NewError(mqutil.ErrServerResp, fmt.Sprintf("server response doesn't match swagger spec: %s", string(respBody)))
}
} else if !respSchema.Type.Contains(gojsonschema.TYPE_STRING) {
return mqutil.NewError(mqutil.ErrServerResp, fmt.Sprintf("server response doesn't match swagger spec: %s", string(respBody)))
}
} else {
// If schema is an array, then not having a body is OK
if !respSchema.Type.Contains(gojsonschema.TYPE_ARRAY) {
return mqutil.NewError(mqutil.ErrServerResp, fmt.Sprintf("swagger.spec expects a non-empty response, but response body is actually empty"))
}
}
// Remove the comparison objects that 1) swagger doesn't expect back 2) we didn't modify
for className, compList := range t.comparisons {
count := 0
for i, comp := range compList {
if comp.new == nil && !respSchema.Contains(className, t.db.Swagger) {
compList[i] = compList[count]
count++
}
}
t.comparisons[className] = compList[count:]
}
}
// success based on return status
success := (status >= 200 && status < 300)
tag := mqswag.GetMeqaTag(respSpec.Description)
if tag != nil && tag.Class == ClassFail {
success = false
}
if t.Expect != nil && t.Expect[ExpectStatus] != nil {
expectedStatus := t.Expect[ExpectStatus]
if expectedStatus == "fail" {
success = !success
} else if expectedStatusNum, ok := expectedStatus.(int); ok {
success = (expectedStatusNum == status)
} else {
success = false
}
}
if !success {
actuallyFailed := true
if actuallyFailed {
mqutil.Logger.Printf("=== test failed, response code %d ===", status)
}
return nil
}
var resultArray []interface{}
var ok bool
if resultObj != nil {
if resultArray, ok = resultObj.([]interface{}); !ok {
resultArray = []interface{}{resultObj}
}
}
// Success, replace or verify based on method.
for className, compArray := range t.comparisons {
for _, c := range compArray {
err := t.ProcessOneComparison(className, c, resultArray)
if err != nil {
return err
}
}
}
return nil
}
// SetRequestParameters sets the parameters. Returns the new request path.
func (t *Test) SetRequestParameters(req *resty.Request) string {
if len(t.QueryParams) > 0 {
req.SetQueryParams(mqutil.MapInterfaceToMapString(t.QueryParams))
mqutil.InterfacePrint(t.QueryParams, "queryParams:\n")
}
if t.BodyParams != nil {
req.SetBody(t.BodyParams)
mqutil.InterfacePrint(t.BodyParams, "bodyParams:\n")
}
if len(t.HeaderParams) > 0 {
req.SetHeaders(mqutil.MapInterfaceToMapString(t.HeaderParams))
mqutil.InterfacePrint(t.HeaderParams, "headerParams:\n")
}
if len(t.FormParams) > 0 {
req.SetFormData(mqutil.MapInterfaceToMapString(t.FormParams))
mqutil.InterfacePrint(t.FormParams, "formParams:\n")
}
path := t.Path
if len(t.PathParams) > 0 {
PathParamsStr := mqutil.MapInterfaceToMapString(t.PathParams)
for k, v := range PathParamsStr {
path = strings.Replace(path, "{"+k+"}", v, -1)
}
mqutil.InterfacePrint(t.PathParams, "pathParams:\n")
}
return path
}
func (t *Test) CopyParams(parentTest *Test) {
if parentTest != nil {
t.Expect = mqutil.MapCopy(parentTest.Expect)
t.QueryParams = mqutil.MapCombine(t.QueryParams, parentTest.QueryParams)
t.PathParams = mqutil.MapCombine(t.PathParams, parentTest.PathParams)
t.HeaderParams = mqutil.MapCombine(t.HeaderParams, parentTest.HeaderParams)
t.FormParams = mqutil.MapCombine(t.FormParams, parentTest.FormParams)
if parentTest.BodyParams != nil {
if t.BodyParams == nil {
t.BodyParams = parentTest.BodyParams
} else {
// replace with parent only if the types are the same
if parentBodyMap, ok := parentTest.BodyParams.(map[string]interface{}); ok {
if bodyMap, ok := t.BodyParams.(map[string]interface{}); ok {
t.BodyParams = mqutil.MapCombine(bodyMap, parentBodyMap)
}
} else {
// For non-map types, just replace with parent if they are the same type.
if reflect.TypeOf(parentTest.BodyParams) == reflect.TypeOf(t.BodyParams) {
t.BodyParams = parentTest.BodyParams
}
}
}
}
}
}
// Run runs the test. Returns the test result.
func (t *Test) Run(plan *TestPlan) error {
mqutil.Logger.Print("\n--- " + t.Name)
err := t.ResolveParameters(plan)
if err != nil {
return err
}
req := resty.R()
if len(plan.Username) > 0 {
req.SetBasicAuth(plan.Username, plan.Password)
}
path := GetBaseURL(t.db.Swagger) + t.SetRequestParameters(req)
var resp *resty.Response
switch t.Method {
case mqswag.MethodGet:
resp, err = req.Get(path)
case mqswag.MethodPost:
resp, err = req.Post(path)
case mqswag.MethodPut:
resp, err = req.Put(path)
case mqswag.MethodDelete:
resp, err = req.Delete(path)
case mqswag.MethodPatch:
resp, err = req.Patch(path)
case mqswag.MethodHead:
resp, err = req.Head(path)
case mqswag.MethodOptions:
resp, err = req.Options(path)
default:
return mqutil.NewError(mqutil.ErrInvalid, fmt.Sprintf("Unknown method in test %s: %v", t.Name, t.Method))
}
if err != nil {
return mqutil.NewError(mqutil.ErrHttp, err.Error())
}
// TODO properly process resp. Check against the current DB to see if they match
mqutil.Logger.Print(resp.Status())
mqutil.Logger.Println(string(resp.Body()))
return t.ProcessResult(resp)
}
// GetSchemaRootType gets the real object type fo the specified schema. It only returns meaningful
// data for object and array of object type of parameters. If the parameter is a basic type it returns
// nil
func (t *Test) GetSchemaRootType(schema *mqswag.Schema, parentTag *mqswag.MeqaTag) (*mqswag.MeqaTag, *mqswag.Schema) {
tag := mqswag.GetMeqaTag(schema.Description)
if tag == nil {
tag = parentTag
}
referenceName, referredSchema, err := t.db.Swagger.GetReferredSchema((*mqswag.Schema)(schema))
if err != nil {
mqutil.Logger.Print(err)
return nil, nil
}
if referredSchema != nil {
if tag == nil {
tag = &mqswag.MeqaTag{referenceName, "", ""}
}
return t.GetSchemaRootType(referredSchema, tag)
}
if len(schema.Enum) != 0 {
return nil, nil
}
if len(schema.Type) == 0 {
return nil, nil
}
if schema.Type.Contains(gojsonschema.TYPE_ARRAY) {
var itemSchema *spec.Schema
if len(schema.Items.Schemas) != 0 {
itemSchema = &(schema.Items.Schemas[0])
} else {
itemSchema = schema.Items.Schema
}
return t.GetSchemaRootType((*mqswag.Schema)(itemSchema), tag)
} else if schema.Type.Contains(gojsonschema.TYPE_OBJECT) {
return tag, schema
}
return nil, nil
}
func StringParamsResolveWithHistory(str string, h *TestHistory) interface{} {
begin := strings.Index(str, "<")
end := strings.Index(str, ">")
if end > begin {
ar := strings.Split(str[begin+1:end], ".")
if len(ar) != 2 {
mqutil.Logger.Printf("invalid parameter: %s", str[begin+1:end])
return nil
}
t := h.GetTest(ar[0])
if t != nil {
return t.GetParamFromComparison(ar[1], "any")
}
}
return nil
}
func MapParamsResolveWithHistory(paramMap map[string]interface{}, h *TestHistory) {
for k, v := range paramMap {
if str, ok := v.(string); ok {
if result := StringParamsResolveWithHistory(str, h); result != nil {
paramMap[k] = result
}
}
}
}
func ArrayParamsResolveWithHistory(paramArray []interface{}, h *TestHistory) {
for i, param := range paramArray {
if paramMap, ok := param.(map[string]interface{}); ok {
MapParamsResolveWithHistory(paramMap, h)
} else if str, ok := param.(string); ok {
if result := StringParamsResolveWithHistory(str, h); result != nil {
paramArray[i] = result
}
}
}
}
func (t *Test) ResolveHistoryParameters(h *TestHistory) {
MapParamsResolveWithHistory(t.PathParams, h)
MapParamsResolveWithHistory(t.FormParams, h)
MapParamsResolveWithHistory(t.HeaderParams, h)
MapParamsResolveWithHistory(t.QueryParams, h)
if bodyMap, ok := t.BodyParams.(map[string]interface{}); ok {
MapParamsResolveWithHistory(bodyMap, h)
} else if bodyArray, ok := t.BodyParams.([]interface{}); ok {
ArrayParamsResolveWithHistory(bodyArray, h)
} else if bodyStr, ok := t.BodyParams.(string); ok {
result := StringParamsResolveWithHistory(bodyStr, h)
if result != nil {
t.BodyParams = result
}
}
}
func ParamsCombine(dst []spec.Parameter, src []spec.Parameter) []spec.Parameter {
if len(dst) == 0 {
return src
}
if len(src) == 0 {
return dst
}
nameMap := make(map[string]int)
for _, entry := range dst {
nameMap[entry.Name] = 1
}
for _, entry := range src {
if nameMap[entry.Name] != 1 {
dst = append(dst, entry)
nameMap[entry.Name] = 1
}
}
return dst
}
// ResolveParameters fullfills the parameters for the specified request using the in-mem DB.
// The resolved parameters will be added to test.Parameters map.
func (t *Test) ResolveParameters(plan *TestPlan) error {
pathItem := t.db.Swagger.Paths.Paths[t.Path]
t.op = getOperationByMethod(&pathItem, t.Method)
if t.op == nil {
return mqutil.NewError(mqutil.ErrNotFound, fmt.Sprintf("Path %s not found in swagger file", t.Path))
}
// There can be parameters at the path level. We merge these with the operation parameters.
t.op.Parameters = ParamsCombine(t.op.Parameters, pathItem.Parameters)
t.tag = mqswag.GetMeqaTag(t.op.Description)
var paramsMap map[string]interface{}
var globalParamsMap map[string]interface{}
var err error
var genParam interface{}
for _, params := range t.op.Parameters {
if params.In == "body" {
if t.BodyParams != nil {
// There is only one body parameter. No need to check name. In fact, we don't
// even store the name in the DSL.
objarray := mqutil.InterfaceToArray(t.BodyParams)
paramTag, schema := t.GetSchemaRootType((*mqswag.Schema)(params.Schema), mqswag.GetMeqaTag(params.Description))
method := t.Method
if t.tag != nil && len(t.tag.Operation) > 0 {
method = t.tag.Operation
}
if schema != nil && paramTag != nil {
for _, obj := range objarray {
t.AddObjectComparison(paramTag.Class, method, obj, (*spec.Schema)(schema))
}
}
continue
}
genParam, err = t.GenerateParameter(¶ms, t.db)
t.BodyParams = genParam
} else {
switch params.In {
case "path":
if t.PathParams == nil {
t.PathParams = make(map[string]interface{})
}
paramsMap = t.PathParams
globalParamsMap = plan.PathParams
case "query":
if t.QueryParams == nil {
t.QueryParams = make(map[string]interface{})
}
paramsMap = t.QueryParams
globalParamsMap = plan.QueryParams
case "header":
if t.HeaderParams == nil {
t.HeaderParams = make(map[string]interface{})
}
paramsMap = t.HeaderParams
globalParamsMap = plan.HeaderParams
case "formData":
if t.FormParams == nil {
t.FormParams = make(map[string]interface{})
}
paramsMap = t.FormParams
globalParamsMap = plan.FormParams
}
// If there is a parameter passed in, just use it. Otherwise generate one.
if paramsMap[params.Name] == nil && globalParamsMap[params.Name] != nil {
paramsMap[params.Name] = globalParamsMap[params.Name]
}
if _, ok := paramsMap[params.Name]; ok {
t.AddBasicComparison(mqswag.GetMeqaTag(params.Description), ¶ms, paramsMap[params.Name])
continue
}
genParam, err = t.GenerateParameter(¶ms, t.db)
paramsMap[params.Name] = genParam
}
if err != nil {
return err
}
}
return nil
}
func getOperationByMethod(item *spec.PathItem, method string) *spec.Operation {
switch method {
case mqswag.MethodGet:
return item.Get
case mqswag.MethodPost:
return item.Post
case mqswag.MethodPut:
return item.Put
case mqswag.MethodDelete:
return item.Delete
case mqswag.MethodPatch:
return item.Patch
case mqswag.MethodHead:
return item.Head
case mqswag.MethodOptions:
return item.Options
}
return nil
}
// GenerateParameter generates paramter value based on the spec.
func (t *Test) GenerateParameter(paramSpec *spec.Parameter, db *mqswag.DB) (interface{}, error) {
tag := mqswag.GetMeqaTag(paramSpec.Description)
if paramSpec.Schema != nil {
return t.GenerateSchema(paramSpec.Name, tag, paramSpec.Schema, db)
}
if len(paramSpec.Enum) != 0 {
return generateEnum(paramSpec.Enum)
}
if len(paramSpec.Type) == 0 {
return nil, mqutil.NewError(mqutil.ErrInvalid, "Parameter doesn't have type")
}
var schema *spec.Schema
if paramSpec.Schema != nil {
schema = paramSpec.Schema
} else {
// construct a full schema from simple ones
schema = (*spec.Schema)(mqswag.CreateSchemaFromSimple(¶mSpec.SimpleSchema, ¶mSpec.CommonValidations))
}
if paramSpec.Type == gojsonschema.TYPE_OBJECT {
return t.generateObject("param_", tag, schema, db)
}
if paramSpec.Type == gojsonschema.TYPE_ARRAY {
return t.generateArray("param_", tag, schema, db)
}
return t.generateByType(schema, paramSpec.Name+"_", tag, paramSpec)
}
// Two ways to get to generateByType
// 1) directly called from GenerateParameter, now we know the type is a parameter, and we want to add to comparison
// 2) called at bottom level, here we know the object will be added to comparison and not the type primitives.
func (t *Test) generateByType(s *spec.Schema, prefix string, parentTag *mqswag.MeqaTag, paramSpec *spec.Parameter) (interface{}, error) {
tag := mqswag.GetMeqaTag(s.Description)
if tag == nil {
tag = parentTag
}
if paramSpec != nil {
if tag != nil && len(tag.Property) > 0 {
// Try to get one from the comparison objects.
for _, c := range t.comparisons[tag.Class] {
if c.old != nil {
return c.old[tag.Property], nil
}
}
// Get one from in-mem db and populate the comparison structure.
ar := t.db.Find(tag.Class, nil, mqswag.MatchAlways, 5)
if len(ar) > 0 {
obj := ar[rand.Intn(len(ar))].(map[string]interface{})
comp := &Comparison{obj, nil, (*spec.Schema)(t.db.GetSchema(tag.Class))}
t.comparisons[tag.Class] = append(t.comparisons[tag.Class], comp)
return obj[tag.Property], nil
}
}
}
if len(s.Type) != 0 {
var result interface{}
var err error
switch s.Type[0] {
case gojsonschema.TYPE_BOOLEAN:
result, err = generateBool(s)
case gojsonschema.TYPE_INTEGER:
result, err = generateInt(s)
case gojsonschema.TYPE_NUMBER:
result, err = generateFloat(s)
case gojsonschema.TYPE_STRING:
result, err = generateString(s, prefix)
case "file":
result, err = reggen.Generate("[0-9]+", 200)
}
if result != nil && err == nil {
t.AddBasicComparison(tag, paramSpec, result)
return result, err
}
}
return nil, mqutil.NewError(mqutil.ErrInvalid, fmt.Sprintf("unrecognized type: %s", s.Type))
}
// RandomTime generate a random time in the range of [t - r, t).
func RandomTime(t time.Time, r time.Duration) time.Time {
return t.Add(-time.Duration(float64(r) * rand.Float64()))
}
// TODO we need to make it context aware. Based on different contexts we should generate different
// date ranges. Prefix is a prefix to use when generating strings. It's only used when there is
// no specified pattern in the swagger.json
func generateString(s *spec.Schema, prefix string) (string, error) {
if s.Format == "date-time" {
t := RandomTime(time.Now(), time.Hour*24*30)
return t.Format(time.RFC3339), nil
}
if s.Format == "date" {
t := RandomTime(time.Now(), time.Hour*24*30)
return t.Format("2006-01-02"), nil
}
// If no pattern is specified, we use the field name + some numbers as pattern
var pattern string
length := 0
if len(s.Pattern) != 0 {
pattern = s.Pattern
length = len(s.Pattern) * 2
} else {
pattern = prefix + "\\d+"
length = len(prefix) + 5
}
str, err := reggen.Generate(pattern, length)
if err != nil {
return "", mqutil.NewError(mqutil.ErrInvalid, err.Error())
}
if len(s.Format) == 0 || s.Format == "password" {
return str, nil
}
if s.Format == "byte" {
return base64.StdEncoding.EncodeToString([]byte(str)), nil
}
if s.Format == "binary" {
return hex.EncodeToString([]byte(str)), nil
}
return "", mqutil.NewError(mqutil.ErrInvalid, fmt.Sprintf("Invalid format string: %s", s.Format))
}
func generateBool(s *spec.Schema) (interface{}, error) {
return rand.Intn(2) == 0, nil
}
func generateFloat(s *spec.Schema) (float64, error) {
var realmin float64
if s.Minimum != nil {
realmin = *s.Minimum
if s.ExclusiveMinimum {
realmin += 0.01
}
}
var realmax float64
if s.Maximum != nil {
realmax = *s.Maximum
if s.ExclusiveMaximum {
realmax -= 0.01
}
}
if realmin >= realmax {
if s.Minimum == nil && s.Maximum == nil {
realmin = -1.0
realmax = 1.0
} else if s.Minimum != nil {
realmax = realmin + math.Abs(realmin)
} else if s.Maximum != nil {
realmin = realmax - math.Abs(realmax)
} else {
// both are present but conflicting
return 0, mqutil.NewError(mqutil.ErrInvalid, fmt.Sprintf("specified min value %v is bigger than max %v",
*s.Minimum, *s.Maximum))
}
}
return rand.Float64()*(realmax-realmin) + realmin, nil
}
func generateInt(s *spec.Schema) (int64, error) {
// Give a default range if there isn't one
if s.Maximum == nil && s.Minimum == nil {
maxf := 10000.0
s.Maximum = &maxf
}
f, err := generateFloat(s)
if err != nil {
return 0, err
}
i := int64(f)
if s.Minimum != nil && i <= int64(*s.Minimum) {
i++
}
return i, nil
}
func (t *Test) generateArray(name string, parentTag *mqswag.MeqaTag, schema *spec.Schema, db *mqswag.DB) (interface{}, error) {
var numItems int
if schema.MaxItems != nil || schema.MinItems != nil {
var maxItems int
if schema.MaxItems != nil {
maxItems = int(*schema.MaxItems)
if maxItems < 0 {
maxItems = 0
}
}
var minItems int
if schema.MinItems != nil {
minItems = int(*schema.MinItems)
if minItems < 0 {
minItems = 0
}
}
maxDiff := maxItems - minItems
if maxDiff <= 0 {
maxDiff = 1
}
numItems = rand.Intn(int(maxDiff)) + minItems
} else {
numItems = rand.Intn(10)
}
var itemSchema *spec.Schema
if len(schema.Items.Schemas) != 0 {
itemSchema = &(schema.Items.Schemas[0])
} else {
itemSchema = schema.Items.Schema
}
tag := mqswag.GetMeqaTag(schema.Description)
if tag == nil {
tag = parentTag
}
var ar []interface{}
var hash map[interface{}]interface{}
if schema.UniqueItems {
hash = make(map[interface{}]interface{})
}
for i := 0; i < numItems; i++ {
entry, err := t.GenerateSchema(name, tag, itemSchema, db)
if err != nil {
return nil, err
}
if hash != nil && hash[entry] != nil {
continue
}
ar = append(ar, entry)
if hash != nil {
hash[entry] = 1
}
}
return ar, nil
}
func (t *Test) generateObject(name string, parentTag *mqswag.MeqaTag, schema *spec.Schema, db *mqswag.DB) (interface{}, error) {
obj := make(map[string]interface{})
for k, v := range schema.Properties {
propertyTag := mqswag.GetMeqaTag(v.Description)
if propertyTag == nil {
propertyTag = parentTag
}
o, err := t.GenerateSchema(k+"_", propertyTag, &v, db)
if err != nil {
return nil, err
}
obj[k] = o
}
tag := mqswag.GetMeqaTag(schema.Description)
if tag == nil {
tag = parentTag
}
var class, method string
if tag != nil {
class = tag.Class
}
if t.tag != nil && len(t.tag.Operation) > 0 {
method = t.tag.Operation // At test level the tag indicates the real method
} else {
method = t.Method
}
if len(class) == 0 {
cl, s := db.FindMatchingSchema(obj)
if s == nil {
mqutil.Logger.Printf("Can't find a known schema for obj %s", name)
return obj, nil
}
class = cl
}
t.AddObjectComparison(class, method, obj, schema)
return obj, nil
}
func (t *Test) GenerateSchema(name string, tag *mqswag.MeqaTag, schema *spec.Schema, db *mqswag.DB) (interface{}, error) {
swagger := db.Swagger
// Deal with refs.
referenceName, referredSchema, err := swagger.GetReferredSchema((*mqswag.Schema)(schema))
if err != nil {
return nil, err
}
if referredSchema != nil {
var paramTag mqswag.MeqaTag
if tag != nil {
paramTag = mqswag.MeqaTag{referenceName, tag.Property, tag.Operation}
} else {
paramTag = mqswag.MeqaTag{referenceName, "", ""}
}
return t.GenerateSchema(name, ¶mTag, (*spec.Schema)(referredSchema), db)
}
if len(schema.Enum) != 0 {
return generateEnum(schema.Enum)
}
if len(schema.Type) == 0 {
return nil, mqutil.NewError(mqutil.ErrInvalid, "Parameter doesn't have type")
}
if schema.Type[0] == gojsonschema.TYPE_OBJECT {
return t.generateObject(name, tag, schema, db)
}
if schema.Type[0] == gojsonschema.TYPE_ARRAY {
return t.generateArray(name, tag, schema, db)
}
return t.generateByType(schema, name, tag, nil)
}
func generateEnum(e []interface{}) (interface{}, error) {
return e[rand.Intn(len(e))], nil
}
|
package graphql
import (
"fmt"
"reflect"
"github.com/graphql-go/graphql/language/ast"
"github.com/graphql-go/graphql/language/printer"
)
const (
TypeKindScalar = "SCALAR"
TypeKindObject = "OBJECT"
TypeKindInterface = "INTERFACE"
TypeKindUnion = "UNION"
TypeKindEnum = "ENUM"
TypeKindInputObject = "INPUT_OBJECT"
TypeKindList = "LIST"
TypeKindNonNull = "NON_NULL"
)
var __Directive *Object
var __Schema *Object
var __Type *Object
var __Field *Object
var __InputValue *Object
var __EnumValue *Object
var __TypeKind *Enum
var SchemaMetaFieldDef *FieldDefinition
var TypeMetaFieldDef *FieldDefinition
var TypeNameMetaFieldDef *FieldDefinition
func init() {
__TypeKind = NewEnum(EnumConfig{
Name: "__TypeKind",
Description: "An enum describing what kind of type a given `__Type` is",
Values: EnumValueConfigMap{
"SCALAR": &EnumValueConfig{
Value: TypeKindScalar,
Description: "Indicates this type is a scalar.",
},
"OBJECT": &EnumValueConfig{
Value: TypeKindObject,
Description: "Indicates this type is an object. " +
"`fields` and `interfaces` are valid fields.",
},
"INTERFACE": &EnumValueConfig{
Value: TypeKindInterface,
Description: "Indicates this type is an interface. " +
"`fields` and `possibleTypes` are valid fields.",
},
"UNION": &EnumValueConfig{
Value: TypeKindUnion,
Description: "Indicates this type is a union. " +
"`possibleTypes` is a valid field.",
},
"ENUM": &EnumValueConfig{
Value: TypeKindEnum,
Description: "Indicates this type is an enum. " +
"`enumValues` is a valid field.",
},
"INPUT_OBJECT": &EnumValueConfig{
Value: TypeKindInputObject,
Description: "Indicates this type is an input object. " +
"`inputFields` is a valid field.",
},
"LIST": &EnumValueConfig{
Value: TypeKindList,
Description: "Indicates this type is a list. " +
"`ofType` is a valid field.",
},
"NON_NULL": &EnumValueConfig{
Value: TypeKindNonNull,
Description: "Indicates this type is a non-null. " +
"`ofType` is a valid field.",
},
},
})
// Note: some fields (for e.g "fields", "interfaces") are defined later due to cyclic reference
__Type = NewObject(ObjectConfig{
Name: "__Type",
Description: "The fundamental unit of any GraphQL Schema is the type. There are " +
"many kinds of types in GraphQL as represented by the `__TypeKind` enum." +
"\n\nDepending on the kind of a type, certain fields describe " +
"information about that type. Scalar types provide no information " +
"beyond a name and description, while Enum types provide their values. " +
"Object and Interface types provide the fields they describe. Abstract " +
"types, Union and Interface, provide the Object types possible " +
"at runtime. List and NonNull types compose other types.",
Fields: Fields{
"kind": &Field{
Type: NewNonNull(__TypeKind),
Resolve: func(p ResolveParams) (interface{}, error) {
switch p.Source.(type) {
case *Scalar:
return TypeKindScalar, nil
case *Object:
return TypeKindObject, nil
case *Interface:
return TypeKindInterface, nil
case *Union:
return TypeKindUnion, nil
case *Enum:
return TypeKindEnum, nil
case *InputObject:
return TypeKindInputObject, nil
case *List:
return TypeKindList, nil
case *NonNull:
return TypeKindNonNull, nil
}
return nil, fmt.Errorf("Unknown kind of type: %v", p.Source)
},
},
"name": &Field{
Type: String,
},
"description": &Field{
Type: String,
},
"fields": &Field{},
"interfaces": &Field{},
"possibleTypes": &Field{},
"enumValues": &Field{},
"inputFields": &Field{},
"ofType": &Field{},
},
})
__InputValue = NewObject(ObjectConfig{
Name: "__InputValue",
Description: "Arguments provided to Fields or Directives and the input fields of an " +
"InputObject are represented as Input Values which describe their type " +
"and optionally a default value.",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"type": &Field{
Type: NewNonNull(__Type),
},
"defaultValue": &Field{
Type: String,
Description: "A GraphQL-formatted string representing the default value for this " +
"input value.",
Resolve: func(p ResolveParams) (interface{}, error) {
if inputVal, ok := p.Source.(*Argument); ok {
if inputVal.DefaultValue == nil {
return nil, nil
}
astVal := astFromValue(inputVal.DefaultValue, inputVal)
return printer.Print(astVal), nil
}
if inputVal, ok := p.Source.(*InputObjectField); ok {
if inputVal.DefaultValue == nil {
return nil, nil
}
astVal := astFromValue(inputVal.DefaultValue, inputVal)
return printer.Print(astVal), nil
}
return nil, nil
},
},
},
})
__Field = NewObject(ObjectConfig{
Name: "__Field",
Description: "Object and Interface types are described by a list of Fields, each of " +
"which has a name, potentially a list of arguments, and a return type.",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"args": &Field{
Type: NewNonNull(NewList(NewNonNull(__InputValue))),
Resolve: func(p ResolveParams) (interface{}, error) {
if field, ok := p.Source.(*FieldDefinition); ok {
return field.Args, nil
}
return []interface{}{}, nil
},
},
"type": &Field{
Type: NewNonNull(__Type),
},
"isDeprecated": &Field{
Type: NewNonNull(Boolean),
Resolve: func(p ResolveParams) (interface{}, error) {
if field, ok := p.Source.(*FieldDefinition); ok {
return (field.DeprecationReason != ""), nil
}
return false, nil
},
},
"deprecationReason": &Field{
Type: String,
},
},
})
__Directive = NewObject(ObjectConfig{
Name: "__Directive",
Description: "A Directive provides a way to describe alternate runtime execution and " +
"type validation behavior in a GraphQL document. " +
"\n\nIn some cases, you need to provide options to alter GraphQL's " +
"execution behavior in ways field arguments will not suffice, such as " +
"conditionally including or skipping a field. Directives provide this by " +
"describing additional information to the executor.",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"args": &Field{
Type: NewNonNull(NewList(
NewNonNull(__InputValue),
)),
},
"onOperation": &Field{
Type: NewNonNull(Boolean),
},
"onFragment": &Field{
Type: NewNonNull(Boolean),
},
"onField": &Field{
Type: NewNonNull(Boolean),
},
},
})
__Schema = NewObject(ObjectConfig{
Name: "__Schema",
Description: `A GraphQL Schema defines the capabilities of a GraphQL server. ` +
`It exposes all available types and directives on the server, as well as ` +
`the entry points for query, mutation, and subscription operations.`,
Fields: Fields{
"types": &Field{
Description: "A list of all types supported by this server.",
Type: NewNonNull(NewList(
NewNonNull(__Type),
)),
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
results := []Type{}
for _, ttype := range schema.TypeMap() {
results = append(results, ttype)
}
return results, nil
}
return []Type{}, nil
},
},
"queryType": &Field{
Description: "The type that query operations will be rooted at.",
Type: NewNonNull(__Type),
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
return schema.QueryType(), nil
}
return nil, nil
},
},
"mutationType": &Field{
Description: `If this server supports mutation, the type that ` +
`mutation operations will be rooted at.`,
Type: __Type,
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
if schema.MutationType() != nil {
return schema.MutationType(), nil
}
}
return nil, nil
},
},
"subscriptionType": &Field{
Description: `If this server supports subscription, the type that ` +
`subscription operations will be rooted at.`,
Type: __Type,
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
if schema.SubscriptionType() != nil {
return schema.SubscriptionType(), nil
}
}
return nil, nil
},
},
"directives": &Field{
Description: `A list of all directives supported by this server.`,
Type: NewNonNull(NewList(
NewNonNull(__Directive),
)),
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
return schema.Directives(), nil
}
return nil, nil
},
},
},
})
__EnumValue = NewObject(ObjectConfig{
Name: "__EnumValue",
Description: "One possible value for a given Enum. Enum values are unique values, not " +
"a placeholder for a string or numeric value. However an Enum value is " +
"returned in a JSON response as a string.",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"isDeprecated": &Field{
Type: NewNonNull(Boolean),
Resolve: func(p ResolveParams) (interface{}, error) {
if field, ok := p.Source.(*EnumValueDefinition); ok {
return (field.DeprecationReason != ""), nil
}
return false, nil
},
},
"deprecationReason": &Field{
Type: String,
},
},
})
// Again, adding field configs to __Type that have cyclic reference here
// because golang don't like them too much during init/compile-time
__Type.AddFieldConfig("fields", &Field{
Type: NewList(NewNonNull(__Field)),
Args: FieldConfigArgument{
"includeDeprecated": &ArgumentConfig{
Type: Boolean,
DefaultValue: false,
},
},
Resolve: func(p ResolveParams) (interface{}, error) {
includeDeprecated, _ := p.Args["includeDeprecated"].(bool)
switch ttype := p.Source.(type) {
case *Object:
if ttype == nil {
return nil, nil
}
fields := []*FieldDefinition{}
for _, field := range ttype.Fields() {
if !includeDeprecated && field.DeprecationReason != "" {
continue
}
fields = append(fields, field)
}
return fields, nil
case *Interface:
if ttype == nil {
return nil, nil
}
fields := []*FieldDefinition{}
for _, field := range ttype.Fields() {
if !includeDeprecated && field.DeprecationReason != "" {
continue
}
fields = append(fields, field)
}
return fields, nil
}
return nil, nil
},
})
__Type.AddFieldConfig("interfaces", &Field{
Type: NewList(NewNonNull(__Type)),
Resolve: func(p ResolveParams) (interface{}, error) {
switch ttype := p.Source.(type) {
case *Object:
return ttype.Interfaces(), nil
}
return nil, nil
},
})
__Type.AddFieldConfig("possibleTypes", &Field{
Type: NewList(NewNonNull(__Type)),
Resolve: func(p ResolveParams) (interface{}, error) {
switch ttype := p.Source.(type) {
case *Interface:
return ttype.PossibleTypes(), nil
case *Union:
return ttype.PossibleTypes(), nil
}
return nil, nil
},
})
__Type.AddFieldConfig("enumValues", &Field{
Type: NewList(NewNonNull(__EnumValue)),
Args: FieldConfigArgument{
"includeDeprecated": &ArgumentConfig{
Type: Boolean,
DefaultValue: false,
},
},
Resolve: func(p ResolveParams) (interface{}, error) {
includeDeprecated, _ := p.Args["includeDeprecated"].(bool)
switch ttype := p.Source.(type) {
case *Enum:
if includeDeprecated {
return ttype.Values(), nil
}
values := []*EnumValueDefinition{}
for _, value := range ttype.Values() {
if value.DeprecationReason != "" {
continue
}
values = append(values, value)
}
return values, nil
}
return nil, nil
},
})
__Type.AddFieldConfig("inputFields", &Field{
Type: NewList(NewNonNull(__InputValue)),
Resolve: func(p ResolveParams) (interface{}, error) {
switch ttype := p.Source.(type) {
case *InputObject:
fields := []*InputObjectField{}
for _, field := range ttype.Fields() {
fields = append(fields, field)
}
return fields, nil
}
return nil, nil
},
})
__Type.AddFieldConfig("ofType", &Field{
Type: __Type,
})
/**
* Note that these are FieldDefinition and not FieldConfig,
* so the format for args is different.
*/
SchemaMetaFieldDef = &FieldDefinition{
Name: "__schema",
Type: NewNonNull(__Schema),
Description: "Access the current type schema of this server.",
Args: []*Argument{},
Resolve: func(p ResolveParams) (interface{}, error) {
return p.Info.Schema, nil
},
}
TypeMetaFieldDef = &FieldDefinition{
Name: "__type",
Type: __Type,
Description: "Request the type information of a single type.",
Args: []*Argument{
&Argument{
PrivateName: "name",
Type: NewNonNull(String),
},
},
Resolve: func(p ResolveParams) (interface{}, error) {
name, ok := p.Args["name"].(string)
if !ok {
return nil, nil
}
return p.Info.Schema.Type(name), nil
},
}
TypeNameMetaFieldDef = &FieldDefinition{
Name: "__typename",
Type: NewNonNull(String),
Description: "The name of the current Object type at runtime.",
Args: []*Argument{},
Resolve: func(p ResolveParams) (interface{}, error) {
return p.Info.ParentType.Name(), nil
},
}
}
/**
* Produces a GraphQL Value AST given a Golang value.
*
* Optionally, a GraphQL type may be provided, which will be used to
* disambiguate between value primitives.
*
* | JSON Value | GraphQL Value |
* | ------------- | -------------------- |
* | Object | Input Object |
* | Array | List |
* | Boolean | Boolean |
* | String | String / Enum Value |
* | Number | Int / Float |
*
*/
func astFromValue(value interface{}, ttype Type) ast.Value {
if ttype, ok := ttype.(*NonNull); ok {
// Note: we're not checking that the result is non-null.
// This function is not responsible for validating the input value.
val := astFromValue(value, ttype.OfType)
return val
}
if isNullish(value) {
return nil
}
valueVal := reflect.ValueOf(value)
if !valueVal.IsValid() {
return nil
}
if valueVal.Type().Kind() == reflect.Ptr {
valueVal = valueVal.Elem()
}
if !valueVal.IsValid() {
return nil
}
// Convert Golang slice to GraphQL list. If the Type is a list, but
// the value is not an array, convert the value using the list's item type.
if ttype, ok := ttype.(*List); ok {
if valueVal.Type().Kind() == reflect.Slice {
itemType := ttype.OfType
values := []ast.Value{}
for i := 0; i < valueVal.Len(); i++ {
item := valueVal.Index(i).Interface()
itemAST := astFromValue(item, itemType)
if itemAST != nil {
values = append(values, itemAST)
}
}
return ast.NewListValue(&ast.ListValue{
Values: values,
})
} else {
// Because GraphQL will accept single values as a "list of one" when
// expecting a list, if there's a non-array value and an expected list type,
// create an AST using the list's item type.
val := astFromValue(value, ttype.OfType)
return val
}
}
if valueVal.Type().Kind() == reflect.Map {
// TODO: implement astFromValue from Map to Value
}
if value, ok := value.(bool); ok {
return ast.NewBooleanValue(&ast.BooleanValue{
Value: value,
})
}
if value, ok := value.(int); ok {
if ttype == Float {
return ast.NewIntValue(&ast.IntValue{
Value: fmt.Sprintf("%v.0", value),
})
}
return ast.NewIntValue(&ast.IntValue{
Value: fmt.Sprintf("%v", value),
})
}
if value, ok := value.(float32); ok {
return ast.NewFloatValue(&ast.FloatValue{
Value: fmt.Sprintf("%v", value),
})
}
if value, ok := value.(float64); ok {
return ast.NewFloatValue(&ast.FloatValue{
Value: fmt.Sprintf("%v", value),
})
}
if value, ok := value.(string); ok {
if _, ok := ttype.(*Enum); ok {
return ast.NewEnumValue(&ast.EnumValue{
Value: fmt.Sprintf("%v", value),
})
}
return ast.NewStringValue(&ast.StringValue{
Value: fmt.Sprintf("%v", value),
})
}
// fallback, treat as string
return ast.NewStringValue(&ast.StringValue{
Value: fmt.Sprintf("%v", value),
})
}
Update dependencies and fix newly found lint error
Commit:
30c39ecc13776b7a2ad4488ca38f6b3ea613f181 [30c39ec]
Parents:
ca003e48c9
Author:
Lee Byron <lee@leebyron.com>
Date:
26 November 2015 at 7:38:06 AM SGT
package graphql
import (
"fmt"
"reflect"
"github.com/graphql-go/graphql/language/ast"
"github.com/graphql-go/graphql/language/printer"
)
const (
TypeKindScalar = "SCALAR"
TypeKindObject = "OBJECT"
TypeKindInterface = "INTERFACE"
TypeKindUnion = "UNION"
TypeKindEnum = "ENUM"
TypeKindInputObject = "INPUT_OBJECT"
TypeKindList = "LIST"
TypeKindNonNull = "NON_NULL"
)
var __Directive *Object
var __Schema *Object
var __Type *Object
var __Field *Object
var __InputValue *Object
var __EnumValue *Object
var __TypeKind *Enum
var SchemaMetaFieldDef *FieldDefinition
var TypeMetaFieldDef *FieldDefinition
var TypeNameMetaFieldDef *FieldDefinition
func init() {
__TypeKind = NewEnum(EnumConfig{
Name: "__TypeKind",
Description: "An enum describing what kind of type a given `__Type` is",
Values: EnumValueConfigMap{
"SCALAR": &EnumValueConfig{
Value: TypeKindScalar,
Description: "Indicates this type is a scalar.",
},
"OBJECT": &EnumValueConfig{
Value: TypeKindObject,
Description: "Indicates this type is an object. " +
"`fields` and `interfaces` are valid fields.",
},
"INTERFACE": &EnumValueConfig{
Value: TypeKindInterface,
Description: "Indicates this type is an interface. " +
"`fields` and `possibleTypes` are valid fields.",
},
"UNION": &EnumValueConfig{
Value: TypeKindUnion,
Description: "Indicates this type is a union. " +
"`possibleTypes` is a valid field.",
},
"ENUM": &EnumValueConfig{
Value: TypeKindEnum,
Description: "Indicates this type is an enum. " +
"`enumValues` is a valid field.",
},
"INPUT_OBJECT": &EnumValueConfig{
Value: TypeKindInputObject,
Description: "Indicates this type is an input object. " +
"`inputFields` is a valid field.",
},
"LIST": &EnumValueConfig{
Value: TypeKindList,
Description: "Indicates this type is a list. " +
"`ofType` is a valid field.",
},
"NON_NULL": &EnumValueConfig{
Value: TypeKindNonNull,
Description: "Indicates this type is a non-null. " +
"`ofType` is a valid field.",
},
},
})
// Note: some fields (for e.g "fields", "interfaces") are defined later due to cyclic reference
__Type = NewObject(ObjectConfig{
Name: "__Type",
Description: "The fundamental unit of any GraphQL Schema is the type. There are " +
"many kinds of types in GraphQL as represented by the `__TypeKind` enum." +
"\n\nDepending on the kind of a type, certain fields describe " +
"information about that type. Scalar types provide no information " +
"beyond a name and description, while Enum types provide their values. " +
"Object and Interface types provide the fields they describe. Abstract " +
"types, Union and Interface, provide the Object types possible " +
"at runtime. List and NonNull types compose other types.",
Fields: Fields{
"kind": &Field{
Type: NewNonNull(__TypeKind),
Resolve: func(p ResolveParams) (interface{}, error) {
switch p.Source.(type) {
case *Scalar:
return TypeKindScalar, nil
case *Object:
return TypeKindObject, nil
case *Interface:
return TypeKindInterface, nil
case *Union:
return TypeKindUnion, nil
case *Enum:
return TypeKindEnum, nil
case *InputObject:
return TypeKindInputObject, nil
case *List:
return TypeKindList, nil
case *NonNull:
return TypeKindNonNull, nil
}
return nil, fmt.Errorf("Unknown kind of type: %v", p.Source)
},
},
"name": &Field{
Type: String,
},
"description": &Field{
Type: String,
},
"fields": &Field{},
"interfaces": &Field{},
"possibleTypes": &Field{},
"enumValues": &Field{},
"inputFields": &Field{},
"ofType": &Field{},
},
})
__InputValue = NewObject(ObjectConfig{
Name: "__InputValue",
Description: "Arguments provided to Fields or Directives and the input fields of an " +
"InputObject are represented as Input Values which describe their type " +
"and optionally a default value.",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"type": &Field{
Type: NewNonNull(__Type),
},
"defaultValue": &Field{
Type: String,
Description: "A GraphQL-formatted string representing the default value for this " +
"input value.",
Resolve: func(p ResolveParams) (interface{}, error) {
if inputVal, ok := p.Source.(*Argument); ok {
if inputVal.DefaultValue == nil {
return nil, nil
}
if isNullish(inputVal.DefaultValue) {
return nil, nil
}
astVal := astFromValue(inputVal.DefaultValue, inputVal)
return printer.Print(astVal), nil
}
if inputVal, ok := p.Source.(*InputObjectField); ok {
if inputVal.DefaultValue == nil {
return nil, nil
}
astVal := astFromValue(inputVal.DefaultValue, inputVal)
return printer.Print(astVal), nil
}
return nil, nil
},
},
},
})
__Field = NewObject(ObjectConfig{
Name: "__Field",
Description: "Object and Interface types are described by a list of Fields, each of " +
"which has a name, potentially a list of arguments, and a return type.",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"args": &Field{
Type: NewNonNull(NewList(NewNonNull(__InputValue))),
Resolve: func(p ResolveParams) (interface{}, error) {
if field, ok := p.Source.(*FieldDefinition); ok {
return field.Args, nil
}
return []interface{}{}, nil
},
},
"type": &Field{
Type: NewNonNull(__Type),
},
"isDeprecated": &Field{
Type: NewNonNull(Boolean),
Resolve: func(p ResolveParams) (interface{}, error) {
if field, ok := p.Source.(*FieldDefinition); ok {
return (field.DeprecationReason != ""), nil
}
return false, nil
},
},
"deprecationReason": &Field{
Type: String,
},
},
})
__Directive = NewObject(ObjectConfig{
Name: "__Directive",
Description: "A Directive provides a way to describe alternate runtime execution and " +
"type validation behavior in a GraphQL document. " +
"\n\nIn some cases, you need to provide options to alter GraphQL's " +
"execution behavior in ways field arguments will not suffice, such as " +
"conditionally including or skipping a field. Directives provide this by " +
"describing additional information to the executor.",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"args": &Field{
Type: NewNonNull(NewList(
NewNonNull(__InputValue),
)),
},
"onOperation": &Field{
Type: NewNonNull(Boolean),
},
"onFragment": &Field{
Type: NewNonNull(Boolean),
},
"onField": &Field{
Type: NewNonNull(Boolean),
},
},
})
__Schema = NewObject(ObjectConfig{
Name: "__Schema",
Description: `A GraphQL Schema defines the capabilities of a GraphQL server. ` +
`It exposes all available types and directives on the server, as well as ` +
`the entry points for query, mutation, and subscription operations.`,
Fields: Fields{
"types": &Field{
Description: "A list of all types supported by this server.",
Type: NewNonNull(NewList(
NewNonNull(__Type),
)),
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
results := []Type{}
for _, ttype := range schema.TypeMap() {
results = append(results, ttype)
}
return results, nil
}
return []Type{}, nil
},
},
"queryType": &Field{
Description: "The type that query operations will be rooted at.",
Type: NewNonNull(__Type),
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
return schema.QueryType(), nil
}
return nil, nil
},
},
"mutationType": &Field{
Description: `If this server supports mutation, the type that ` +
`mutation operations will be rooted at.`,
Type: __Type,
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
if schema.MutationType() != nil {
return schema.MutationType(), nil
}
}
return nil, nil
},
},
"subscriptionType": &Field{
Description: `If this server supports subscription, the type that ` +
`subscription operations will be rooted at.`,
Type: __Type,
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
if schema.SubscriptionType() != nil {
return schema.SubscriptionType(), nil
}
}
return nil, nil
},
},
"directives": &Field{
Description: `A list of all directives supported by this server.`,
Type: NewNonNull(NewList(
NewNonNull(__Directive),
)),
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
return schema.Directives(), nil
}
return nil, nil
},
},
},
})
__EnumValue = NewObject(ObjectConfig{
Name: "__EnumValue",
Description: "One possible value for a given Enum. Enum values are unique values, not " +
"a placeholder for a string or numeric value. However an Enum value is " +
"returned in a JSON response as a string.",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"isDeprecated": &Field{
Type: NewNonNull(Boolean),
Resolve: func(p ResolveParams) (interface{}, error) {
if field, ok := p.Source.(*EnumValueDefinition); ok {
return (field.DeprecationReason != ""), nil
}
return false, nil
},
},
"deprecationReason": &Field{
Type: String,
},
},
})
// Again, adding field configs to __Type that have cyclic reference here
// because golang don't like them too much during init/compile-time
__Type.AddFieldConfig("fields", &Field{
Type: NewList(NewNonNull(__Field)),
Args: FieldConfigArgument{
"includeDeprecated": &ArgumentConfig{
Type: Boolean,
DefaultValue: false,
},
},
Resolve: func(p ResolveParams) (interface{}, error) {
includeDeprecated, _ := p.Args["includeDeprecated"].(bool)
switch ttype := p.Source.(type) {
case *Object:
if ttype == nil {
return nil, nil
}
fields := []*FieldDefinition{}
for _, field := range ttype.Fields() {
if !includeDeprecated && field.DeprecationReason != "" {
continue
}
fields = append(fields, field)
}
return fields, nil
case *Interface:
if ttype == nil {
return nil, nil
}
fields := []*FieldDefinition{}
for _, field := range ttype.Fields() {
if !includeDeprecated && field.DeprecationReason != "" {
continue
}
fields = append(fields, field)
}
return fields, nil
}
return nil, nil
},
})
__Type.AddFieldConfig("interfaces", &Field{
Type: NewList(NewNonNull(__Type)),
Resolve: func(p ResolveParams) (interface{}, error) {
switch ttype := p.Source.(type) {
case *Object:
return ttype.Interfaces(), nil
}
return nil, nil
},
})
__Type.AddFieldConfig("possibleTypes", &Field{
Type: NewList(NewNonNull(__Type)),
Resolve: func(p ResolveParams) (interface{}, error) {
switch ttype := p.Source.(type) {
case *Interface:
return ttype.PossibleTypes(), nil
case *Union:
return ttype.PossibleTypes(), nil
}
return nil, nil
},
})
__Type.AddFieldConfig("enumValues", &Field{
Type: NewList(NewNonNull(__EnumValue)),
Args: FieldConfigArgument{
"includeDeprecated": &ArgumentConfig{
Type: Boolean,
DefaultValue: false,
},
},
Resolve: func(p ResolveParams) (interface{}, error) {
includeDeprecated, _ := p.Args["includeDeprecated"].(bool)
switch ttype := p.Source.(type) {
case *Enum:
if includeDeprecated {
return ttype.Values(), nil
}
values := []*EnumValueDefinition{}
for _, value := range ttype.Values() {
if value.DeprecationReason != "" {
continue
}
values = append(values, value)
}
return values, nil
}
return nil, nil
},
})
__Type.AddFieldConfig("inputFields", &Field{
Type: NewList(NewNonNull(__InputValue)),
Resolve: func(p ResolveParams) (interface{}, error) {
switch ttype := p.Source.(type) {
case *InputObject:
fields := []*InputObjectField{}
for _, field := range ttype.Fields() {
fields = append(fields, field)
}
return fields, nil
}
return nil, nil
},
})
__Type.AddFieldConfig("ofType", &Field{
Type: __Type,
})
/**
* Note that these are FieldDefinition and not FieldConfig,
* so the format for args is different.
*/
SchemaMetaFieldDef = &FieldDefinition{
Name: "__schema",
Type: NewNonNull(__Schema),
Description: "Access the current type schema of this server.",
Args: []*Argument{},
Resolve: func(p ResolveParams) (interface{}, error) {
return p.Info.Schema, nil
},
}
TypeMetaFieldDef = &FieldDefinition{
Name: "__type",
Type: __Type,
Description: "Request the type information of a single type.",
Args: []*Argument{
&Argument{
PrivateName: "name",
Type: NewNonNull(String),
},
},
Resolve: func(p ResolveParams) (interface{}, error) {
name, ok := p.Args["name"].(string)
if !ok {
return nil, nil
}
return p.Info.Schema.Type(name), nil
},
}
TypeNameMetaFieldDef = &FieldDefinition{
Name: "__typename",
Type: NewNonNull(String),
Description: "The name of the current Object type at runtime.",
Args: []*Argument{},
Resolve: func(p ResolveParams) (interface{}, error) {
return p.Info.ParentType.Name(), nil
},
}
}
/**
* Produces a GraphQL Value AST given a Golang value.
*
* Optionally, a GraphQL type may be provided, which will be used to
* disambiguate between value primitives.
*
* | JSON Value | GraphQL Value |
* | ------------- | -------------------- |
* | Object | Input Object |
* | Array | List |
* | Boolean | Boolean |
* | String | String / Enum Value |
* | Number | Int / Float |
*
*/
func astFromValue(value interface{}, ttype Type) ast.Value {
if ttype, ok := ttype.(*NonNull); ok {
// Note: we're not checking that the result is non-null.
// This function is not responsible for validating the input value.
val := astFromValue(value, ttype.OfType)
return val
}
if isNullish(value) {
return nil
}
valueVal := reflect.ValueOf(value)
if !valueVal.IsValid() {
return nil
}
if valueVal.Type().Kind() == reflect.Ptr {
valueVal = valueVal.Elem()
}
if !valueVal.IsValid() {
return nil
}
// Convert Golang slice to GraphQL list. If the Type is a list, but
// the value is not an array, convert the value using the list's item type.
if ttype, ok := ttype.(*List); ok {
if valueVal.Type().Kind() == reflect.Slice {
itemType := ttype.OfType
values := []ast.Value{}
for i := 0; i < valueVal.Len(); i++ {
item := valueVal.Index(i).Interface()
itemAST := astFromValue(item, itemType)
if itemAST != nil {
values = append(values, itemAST)
}
}
return ast.NewListValue(&ast.ListValue{
Values: values,
})
} else {
// Because GraphQL will accept single values as a "list of one" when
// expecting a list, if there's a non-array value and an expected list type,
// create an AST using the list's item type.
val := astFromValue(value, ttype.OfType)
return val
}
}
if valueVal.Type().Kind() == reflect.Map {
// TODO: implement astFromValue from Map to Value
}
if value, ok := value.(bool); ok {
return ast.NewBooleanValue(&ast.BooleanValue{
Value: value,
})
}
if value, ok := value.(int); ok {
if ttype == Float {
return ast.NewIntValue(&ast.IntValue{
Value: fmt.Sprintf("%v.0", value),
})
}
return ast.NewIntValue(&ast.IntValue{
Value: fmt.Sprintf("%v", value),
})
}
if value, ok := value.(float32); ok {
return ast.NewFloatValue(&ast.FloatValue{
Value: fmt.Sprintf("%v", value),
})
}
if value, ok := value.(float64); ok {
return ast.NewFloatValue(&ast.FloatValue{
Value: fmt.Sprintf("%v", value),
})
}
if value, ok := value.(string); ok {
if _, ok := ttype.(*Enum); ok {
return ast.NewEnumValue(&ast.EnumValue{
Value: fmt.Sprintf("%v", value),
})
}
return ast.NewStringValue(&ast.StringValue{
Value: fmt.Sprintf("%v", value),
})
}
// fallback, treat as string
return ast.NewStringValue(&ast.StringValue{
Value: fmt.Sprintf("%v", value),
})
}
|
package graphql
import (
"fmt"
"reflect"
"github.com/graphql-go/graphql/language/ast"
"github.com/graphql-go/graphql/language/printer"
)
const (
TypeKindScalar = "SCALAR"
TypeKindObject = "OBJECT"
TypeKindInterface = "INTERFACE"
TypeKindUnion = "UNION"
TypeKindEnum = "ENUM"
TypeKindInputObject = "INPUT_OBJECT"
TypeKindList = "LIST"
TypeKindNonNull = "NON_NULL"
)
var __Directive *Object
var __Schema *Object
var __Type *Object
var __Field *Object
var __InputValue *Object
var __EnumValue *Object
var __TypeKind *Enum
var SchemaMetaFieldDef *FieldDefinition
var TypeMetaFieldDef *FieldDefinition
var TypeNameMetaFieldDef *FieldDefinition
func init() {
__TypeKind = NewEnum(EnumConfig{
Name: "__TypeKind",
Description: "An enum describing what kind of type a given __Type is",
Values: EnumValueConfigMap{
"SCALAR": &EnumValueConfig{
Value: TypeKindScalar,
Description: "Indicates this type is a scalar.",
},
"OBJECT": &EnumValueConfig{
Value: TypeKindObject,
Description: "Indicates this type is an object. " +
"`fields` and `interfaces` are valid fields.",
},
"INTERFACE": &EnumValueConfig{
Value: TypeKindInterface,
Description: "Indicates this type is an interface. " +
"`fields` and `possibleTypes` are valid fields.",
},
"UNION": &EnumValueConfig{
Value: TypeKindUnion,
Description: "Indicates this type is a union. " +
"`possibleTypes` is a valid field.",
},
"ENUM": &EnumValueConfig{
Value: TypeKindEnum,
Description: "Indicates this type is an enum. " +
"`enumValues` is a valid field.",
},
"INPUT_OBJECT": &EnumValueConfig{
Value: TypeKindInputObject,
Description: "Indicates this type is an input object. " +
"`inputFields` is a valid field.",
},
"LIST": &EnumValueConfig{
Value: TypeKindList,
Description: "Indicates this type is a list. " +
"`ofType` is a valid field.",
},
"NON_NULL": &EnumValueConfig{
Value: TypeKindNonNull,
Description: "Indicates this type is a non-null. " +
"`ofType` is a valid field.",
},
},
})
// Note: some fields (for e.g "fields", "interfaces") are defined later due to cyclic reference
__Type = NewObject(ObjectConfig{
Name: "__Type",
Fields: Fields{
"kind": &Field{
Type: NewNonNull(__TypeKind),
Resolve: func(p ResolveParams) (interface{}, error) {
switch p.Source.(type) {
case *Scalar:
return TypeKindScalar, nil
case *Object:
return TypeKindObject, nil
case *Interface:
return TypeKindInterface, nil
case *Union:
return TypeKindUnion, nil
case *Enum:
return TypeKindEnum, nil
case *InputObject:
return TypeKindInputObject, nil
case *List:
return TypeKindList, nil
case *NonNull:
return TypeKindNonNull, nil
}
return nil, fmt.Errorf("Unknown kind of type: %v", p.Source)
},
},
"name": &Field{
Type: String,
},
"description": &Field{
Type: String,
},
"fields": &Field{},
"interfaces": &Field{},
"possibleTypes": &Field{},
"enumValues": &Field{},
"inputFields": &Field{},
"ofType": &Field{},
},
})
__InputValue = NewObject(ObjectConfig{
Name: "__InputValue",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"type": &Field{
Type: NewNonNull(__Type),
},
"defaultValue": &Field{
Type: String,
Resolve: func(p ResolveParams) (interface{}, error) {
if inputVal, ok := p.Source.(*Argument); ok {
if inputVal.DefaultValue == nil {
return nil, nil
}
astVal := astFromValue(inputVal.DefaultValue, inputVal)
return printer.Print(astVal), nil
}
if inputVal, ok := p.Source.(*InputObjectField); ok {
if inputVal.DefaultValue == nil {
return nil, nil
}
astVal := astFromValue(inputVal.DefaultValue, inputVal)
return printer.Print(astVal), nil
}
return nil, nil
},
},
},
})
__Field = NewObject(ObjectConfig{
Name: "__Field",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"args": &Field{
Type: NewNonNull(NewList(NewNonNull(__InputValue))),
Resolve: func(p ResolveParams) (interface{}, error) {
if field, ok := p.Source.(*FieldDefinition); ok {
return field.Args, nil
}
return []interface{}{}, nil
},
},
"type": &Field{
Type: NewNonNull(__Type),
},
"isDeprecated": &Field{
Type: NewNonNull(Boolean),
Resolve: func(p ResolveParams) (interface{}, error) {
if field, ok := p.Source.(*FieldDefinition); ok {
return (field.DeprecationReason != ""), nil
}
return false, nil
},
},
"deprecationReason": &Field{
Type: String,
},
},
})
__Directive = NewObject(ObjectConfig{
Name: "__Directive",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"args": &Field{
Type: NewNonNull(NewList(
NewNonNull(__InputValue),
)),
},
"onOperation": &Field{
Type: NewNonNull(Boolean),
},
"onFragment": &Field{
Type: NewNonNull(Boolean),
},
"onField": &Field{
Type: NewNonNull(Boolean),
},
},
})
__Schema = NewObject(ObjectConfig{
Name: "__Schema",
Description: `A GraphQL Schema defines the capabilities of a GraphQL
server. It exposes all available types and directives on
the server, as well as the entry points for query and
mutation operations.`,
Fields: Fields{
"types": &Field{
Description: "A list of all types supported by this server.",
Type: NewNonNull(NewList(
NewNonNull(__Type),
)),
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
results := []Type{}
for _, ttype := range schema.TypeMap() {
results = append(results, ttype)
}
return results, nil
}
return []Type{}, nil
},
},
"queryType": &Field{
Description: "The type that query operations will be rooted at.",
Type: NewNonNull(__Type),
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
return schema.QueryType(), nil
}
return nil, nil
},
},
"mutationType": &Field{
Description: `If this server supports mutation, the type that ` +
`mutation operations will be rooted at.`,
Type: __Type,
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
if schema.MutationType() != nil {
return schema.MutationType(), nil
}
}
return nil, nil
},
},
"directives": &Field{
Description: `A list of all directives supported by this server.`,
Type: NewNonNull(NewList(
NewNonNull(__Directive),
)),
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
return schema.Directives(), nil
}
return nil, nil
},
},
},
})
__EnumValue = NewObject(ObjectConfig{
Name: "__EnumValue",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"isDeprecated": &Field{
Type: NewNonNull(Boolean),
Resolve: func(p ResolveParams) (interface{}, error) {
if field, ok := p.Source.(*EnumValueDefinition); ok {
return (field.DeprecationReason != ""), nil
}
return false, nil
},
},
"deprecationReason": &Field{
Type: String,
},
},
})
// Again, adding field configs to __Type that have cyclic reference here
// because golang don't like them too much during init/compile-time
__Type.AddFieldConfig("fields", &Field{
Type: NewList(NewNonNull(__Field)),
Args: FieldConfigArgument{
"includeDeprecated": &ArgumentConfig{
Type: Boolean,
DefaultValue: false,
},
},
Resolve: func(p ResolveParams) (interface{}, error) {
includeDeprecated, _ := p.Args["includeDeprecated"].(bool)
switch ttype := p.Source.(type) {
case *Object:
if ttype == nil {
return nil, nil
}
fields := []*FieldDefinition{}
for _, field := range ttype.Fields() {
if !includeDeprecated && field.DeprecationReason != "" {
continue
}
fields = append(fields, field)
}
return fields, nil
case *Interface:
if ttype == nil {
return nil, nil
}
fields := []*FieldDefinition{}
for _, field := range ttype.Fields() {
if !includeDeprecated && field.DeprecationReason != "" {
continue
}
fields = append(fields, field)
}
return fields, nil
}
return nil, nil
},
})
__Type.AddFieldConfig("interfaces", &Field{
Type: NewList(NewNonNull(__Type)),
Resolve: func(p ResolveParams) (interface{}, error) {
switch ttype := p.Source.(type) {
case *Object:
return ttype.Interfaces(), nil
}
return nil, nil
},
})
__Type.AddFieldConfig("possibleTypes", &Field{
Type: NewList(NewNonNull(__Type)),
Resolve: func(p ResolveParams) (interface{}, error) {
switch ttype := p.Source.(type) {
case *Interface:
return ttype.PossibleTypes(), nil
case *Union:
return ttype.PossibleTypes(), nil
}
return nil, nil
},
})
__Type.AddFieldConfig("enumValues", &Field{
Type: NewList(NewNonNull(__EnumValue)),
Args: FieldConfigArgument{
"includeDeprecated": &ArgumentConfig{
Type: Boolean,
DefaultValue: false,
},
},
Resolve: func(p ResolveParams) (interface{}, error) {
includeDeprecated, _ := p.Args["includeDeprecated"].(bool)
switch ttype := p.Source.(type) {
case *Enum:
if includeDeprecated {
return ttype.Values(), nil
}
values := []*EnumValueDefinition{}
for _, value := range ttype.Values() {
if value.DeprecationReason != "" {
continue
}
values = append(values, value)
}
return values, nil
}
return nil, nil
},
})
__Type.AddFieldConfig("inputFields", &Field{
Type: NewList(NewNonNull(__InputValue)),
Resolve: func(p ResolveParams) (interface{}, error) {
switch ttype := p.Source.(type) {
case *InputObject:
fields := []*InputObjectField{}
for _, field := range ttype.Fields() {
fields = append(fields, field)
}
return fields, nil
}
return nil, nil
},
})
__Type.AddFieldConfig("ofType", &Field{
Type: __Type,
})
/**
* Note that these are FieldDefinition and not FieldConfig,
* so the format for args is different.
*/
SchemaMetaFieldDef = &FieldDefinition{
Name: "__schema",
Type: NewNonNull(__Schema),
Description: "Access the current type schema of this server.",
Args: []*Argument{},
Resolve: func(p ResolveParams) (interface{}, error) {
return p.Info.Schema, nil
},
}
TypeMetaFieldDef = &FieldDefinition{
Name: "__type",
Type: __Type,
Description: "Request the type information of a single type.",
Args: []*Argument{
&Argument{
PrivateName: "name",
Type: NewNonNull(String),
},
},
Resolve: func(p ResolveParams) (interface{}, error) {
name, ok := p.Args["name"].(string)
if !ok {
return nil, nil
}
return p.Info.Schema.Type(name), nil
},
}
TypeNameMetaFieldDef = &FieldDefinition{
Name: "__typename",
Type: NewNonNull(String),
Description: "The name of the current Object type at runtime.",
Args: []*Argument{},
Resolve: func(p ResolveParams) (interface{}, error) {
return p.Info.ParentType.Name(), nil
},
}
}
/**
* Produces a GraphQL Value AST given a Golang value.
*
* Optionally, a GraphQL type may be provided, which will be used to
* disambiguate between value primitives.
*
* | JSON Value | GraphQL Value |
* | ------------- | -------------------- |
* | Object | Input Object |
* | Array | List |
* | Boolean | Boolean |
* | String | String / Enum Value |
* | Number | Int / Float |
*
*/
func astFromValue(value interface{}, ttype Type) ast.Value {
if ttype, ok := ttype.(*NonNull); ok {
// Note: we're not checking that the result is non-null.
// This function is not responsible for validating the input value.
val := astFromValue(value, ttype.OfType)
return val
}
if isNullish(value) {
return nil
}
valueVal := reflect.ValueOf(value)
if !valueVal.IsValid() {
return nil
}
if valueVal.Type().Kind() == reflect.Ptr {
valueVal = valueVal.Elem()
}
if !valueVal.IsValid() {
return nil
}
// Convert Golang slice to GraphQL list. If the Type is a list, but
// the value is not an array, convert the value using the list's item type.
if ttype, ok := ttype.(*List); ok {
if valueVal.Type().Kind() == reflect.Slice {
itemType := ttype.OfType
values := []ast.Value{}
for i := 0; i < valueVal.Len(); i++ {
item := valueVal.Index(i).Interface()
itemAST := astFromValue(item, itemType)
if itemAST != nil {
values = append(values, itemAST)
}
}
return ast.NewListValue(&ast.ListValue{
Values: values,
})
} else {
// Because GraphQL will accept single values as a "list of one" when
// expecting a list, if there's a non-array value and an expected list type,
// create an AST using the list's item type.
val := astFromValue(value, ttype.OfType)
return val
}
}
if valueVal.Type().Kind() == reflect.Map {
// TODO: implement astFromValue from Map to Value
}
if value, ok := value.(bool); ok {
return ast.NewBooleanValue(&ast.BooleanValue{
Value: value,
})
}
if value, ok := value.(int); ok {
if ttype == Float {
return ast.NewIntValue(&ast.IntValue{
Value: fmt.Sprintf("%v.0", value),
})
}
return ast.NewIntValue(&ast.IntValue{
Value: fmt.Sprintf("%v", value),
})
}
if value, ok := value.(float32); ok {
return ast.NewFloatValue(&ast.FloatValue{
Value: fmt.Sprintf("%v", value),
})
}
if value, ok := value.(float64); ok {
return ast.NewFloatValue(&ast.FloatValue{
Value: fmt.Sprintf("%v", value),
})
}
if value, ok := value.(string); ok {
if _, ok := ttype.(*Enum); ok {
return ast.NewEnumValue(&ast.EnumValue{
Value: fmt.Sprintf("%v", value),
})
}
return ast.NewStringValue(&ast.StringValue{
Value: fmt.Sprintf("%v", value),
})
}
// fallback, treat as string
return ast.NewStringValue(&ast.StringValue{
Value: fmt.Sprintf("%v", value),
})
}
Fix Introspection add SubscriptionType
package graphql
import (
"fmt"
"reflect"
"github.com/graphql-go/graphql/language/ast"
"github.com/graphql-go/graphql/language/printer"
)
const (
TypeKindScalar = "SCALAR"
TypeKindObject = "OBJECT"
TypeKindInterface = "INTERFACE"
TypeKindUnion = "UNION"
TypeKindEnum = "ENUM"
TypeKindInputObject = "INPUT_OBJECT"
TypeKindList = "LIST"
TypeKindNonNull = "NON_NULL"
)
var __Directive *Object
var __Schema *Object
var __Type *Object
var __Field *Object
var __InputValue *Object
var __EnumValue *Object
var __TypeKind *Enum
var SchemaMetaFieldDef *FieldDefinition
var TypeMetaFieldDef *FieldDefinition
var TypeNameMetaFieldDef *FieldDefinition
func init() {
__TypeKind = NewEnum(EnumConfig{
Name: "__TypeKind",
Description: "An enum describing what kind of type a given __Type is",
Values: EnumValueConfigMap{
"SCALAR": &EnumValueConfig{
Value: TypeKindScalar,
Description: "Indicates this type is a scalar.",
},
"OBJECT": &EnumValueConfig{
Value: TypeKindObject,
Description: "Indicates this type is an object. " +
"`fields` and `interfaces` are valid fields.",
},
"INTERFACE": &EnumValueConfig{
Value: TypeKindInterface,
Description: "Indicates this type is an interface. " +
"`fields` and `possibleTypes` are valid fields.",
},
"UNION": &EnumValueConfig{
Value: TypeKindUnion,
Description: "Indicates this type is a union. " +
"`possibleTypes` is a valid field.",
},
"ENUM": &EnumValueConfig{
Value: TypeKindEnum,
Description: "Indicates this type is an enum. " +
"`enumValues` is a valid field.",
},
"INPUT_OBJECT": &EnumValueConfig{
Value: TypeKindInputObject,
Description: "Indicates this type is an input object. " +
"`inputFields` is a valid field.",
},
"LIST": &EnumValueConfig{
Value: TypeKindList,
Description: "Indicates this type is a list. " +
"`ofType` is a valid field.",
},
"NON_NULL": &EnumValueConfig{
Value: TypeKindNonNull,
Description: "Indicates this type is a non-null. " +
"`ofType` is a valid field.",
},
},
})
// Note: some fields (for e.g "fields", "interfaces") are defined later due to cyclic reference
__Type = NewObject(ObjectConfig{
Name: "__Type",
Fields: Fields{
"kind": &Field{
Type: NewNonNull(__TypeKind),
Resolve: func(p ResolveParams) (interface{}, error) {
switch p.Source.(type) {
case *Scalar:
return TypeKindScalar, nil
case *Object:
return TypeKindObject, nil
case *Interface:
return TypeKindInterface, nil
case *Union:
return TypeKindUnion, nil
case *Enum:
return TypeKindEnum, nil
case *InputObject:
return TypeKindInputObject, nil
case *List:
return TypeKindList, nil
case *NonNull:
return TypeKindNonNull, nil
}
return nil, fmt.Errorf("Unknown kind of type: %v", p.Source)
},
},
"name": &Field{
Type: String,
},
"description": &Field{
Type: String,
},
"fields": &Field{},
"interfaces": &Field{},
"possibleTypes": &Field{},
"enumValues": &Field{},
"inputFields": &Field{},
"ofType": &Field{},
},
})
__InputValue = NewObject(ObjectConfig{
Name: "__InputValue",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"type": &Field{
Type: NewNonNull(__Type),
},
"defaultValue": &Field{
Type: String,
Resolve: func(p ResolveParams) (interface{}, error) {
if inputVal, ok := p.Source.(*Argument); ok {
if inputVal.DefaultValue == nil {
return nil, nil
}
astVal := astFromValue(inputVal.DefaultValue, inputVal)
return printer.Print(astVal), nil
}
if inputVal, ok := p.Source.(*InputObjectField); ok {
if inputVal.DefaultValue == nil {
return nil, nil
}
astVal := astFromValue(inputVal.DefaultValue, inputVal)
return printer.Print(astVal), nil
}
return nil, nil
},
},
},
})
__Field = NewObject(ObjectConfig{
Name: "__Field",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"args": &Field{
Type: NewNonNull(NewList(NewNonNull(__InputValue))),
Resolve: func(p ResolveParams) (interface{}, error) {
if field, ok := p.Source.(*FieldDefinition); ok {
return field.Args, nil
}
return []interface{}{}, nil
},
},
"type": &Field{
Type: NewNonNull(__Type),
},
"isDeprecated": &Field{
Type: NewNonNull(Boolean),
Resolve: func(p ResolveParams) (interface{}, error) {
if field, ok := p.Source.(*FieldDefinition); ok {
return (field.DeprecationReason != ""), nil
}
return false, nil
},
},
"deprecationReason": &Field{
Type: String,
},
},
})
__Directive = NewObject(ObjectConfig{
Name: "__Directive",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"args": &Field{
Type: NewNonNull(NewList(
NewNonNull(__InputValue),
)),
},
"onOperation": &Field{
Type: NewNonNull(Boolean),
},
"onFragment": &Field{
Type: NewNonNull(Boolean),
},
"onField": &Field{
Type: NewNonNull(Boolean),
},
},
})
__Schema = NewObject(ObjectConfig{
Name: "__Schema",
Description: `A GraphQL Schema defines the capabilities of a GraphQL
server. It exposes all available types and directives on
the server, as well as the entry points for query and
mutation operations.`,
Fields: Fields{
"types": &Field{
Description: "A list of all types supported by this server.",
Type: NewNonNull(NewList(
NewNonNull(__Type),
)),
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
results := []Type{}
for _, ttype := range schema.TypeMap() {
results = append(results, ttype)
}
return results, nil
}
return []Type{}, nil
},
},
"queryType": &Field{
Description: "The type that query operations will be rooted at.",
Type: NewNonNull(__Type),
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
return schema.QueryType(), nil
}
return nil, nil
},
},
"mutationType": &Field{
Description: `If this server supports mutation, the type that ` +
`mutation operations will be rooted at.`,
Type: __Type,
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
if schema.MutationType() != nil {
return schema.MutationType(), nil
}
}
return nil, nil
},
},
"subscriptionType": &Field{
Description: `If this server support subscription, the type that ' +
'subscription operations will be rooted at.`,
Type: __Type,
Resolve: func(p ResolveParams) (interface{}, error) {
return nil, nil
},
},
"directives": &Field{
Description: `A list of all directives supported by this server.`,
Type: NewNonNull(NewList(
NewNonNull(__Directive),
)),
Resolve: func(p ResolveParams) (interface{}, error) {
if schema, ok := p.Source.(Schema); ok {
return schema.Directives(), nil
}
return nil, nil
},
},
},
})
__EnumValue = NewObject(ObjectConfig{
Name: "__EnumValue",
Fields: Fields{
"name": &Field{
Type: NewNonNull(String),
},
"description": &Field{
Type: String,
},
"isDeprecated": &Field{
Type: NewNonNull(Boolean),
Resolve: func(p ResolveParams) (interface{}, error) {
if field, ok := p.Source.(*EnumValueDefinition); ok {
return (field.DeprecationReason != ""), nil
}
return false, nil
},
},
"deprecationReason": &Field{
Type: String,
},
},
})
// Again, adding field configs to __Type that have cyclic reference here
// because golang don't like them too much during init/compile-time
__Type.AddFieldConfig("fields", &Field{
Type: NewList(NewNonNull(__Field)),
Args: FieldConfigArgument{
"includeDeprecated": &ArgumentConfig{
Type: Boolean,
DefaultValue: false,
},
},
Resolve: func(p ResolveParams) (interface{}, error) {
includeDeprecated, _ := p.Args["includeDeprecated"].(bool)
switch ttype := p.Source.(type) {
case *Object:
if ttype == nil {
return nil, nil
}
fields := []*FieldDefinition{}
for _, field := range ttype.Fields() {
if !includeDeprecated && field.DeprecationReason != "" {
continue
}
fields = append(fields, field)
}
return fields, nil
case *Interface:
if ttype == nil {
return nil, nil
}
fields := []*FieldDefinition{}
for _, field := range ttype.Fields() {
if !includeDeprecated && field.DeprecationReason != "" {
continue
}
fields = append(fields, field)
}
return fields, nil
}
return nil, nil
},
})
__Type.AddFieldConfig("interfaces", &Field{
Type: NewList(NewNonNull(__Type)),
Resolve: func(p ResolveParams) (interface{}, error) {
switch ttype := p.Source.(type) {
case *Object:
return ttype.Interfaces(), nil
}
return nil, nil
},
})
__Type.AddFieldConfig("possibleTypes", &Field{
Type: NewList(NewNonNull(__Type)),
Resolve: func(p ResolveParams) (interface{}, error) {
switch ttype := p.Source.(type) {
case *Interface:
return ttype.PossibleTypes(), nil
case *Union:
return ttype.PossibleTypes(), nil
}
return nil, nil
},
})
__Type.AddFieldConfig("enumValues", &Field{
Type: NewList(NewNonNull(__EnumValue)),
Args: FieldConfigArgument{
"includeDeprecated": &ArgumentConfig{
Type: Boolean,
DefaultValue: false,
},
},
Resolve: func(p ResolveParams) (interface{}, error) {
includeDeprecated, _ := p.Args["includeDeprecated"].(bool)
switch ttype := p.Source.(type) {
case *Enum:
if includeDeprecated {
return ttype.Values(), nil
}
values := []*EnumValueDefinition{}
for _, value := range ttype.Values() {
if value.DeprecationReason != "" {
continue
}
values = append(values, value)
}
return values, nil
}
return nil, nil
},
})
__Type.AddFieldConfig("inputFields", &Field{
Type: NewList(NewNonNull(__InputValue)),
Resolve: func(p ResolveParams) (interface{}, error) {
switch ttype := p.Source.(type) {
case *InputObject:
fields := []*InputObjectField{}
for _, field := range ttype.Fields() {
fields = append(fields, field)
}
return fields, nil
}
return nil, nil
},
})
__Type.AddFieldConfig("ofType", &Field{
Type: __Type,
})
/**
* Note that these are FieldDefinition and not FieldConfig,
* so the format for args is different.
*/
SchemaMetaFieldDef = &FieldDefinition{
Name: "__schema",
Type: NewNonNull(__Schema),
Description: "Access the current type schema of this server.",
Args: []*Argument{},
Resolve: func(p ResolveParams) (interface{}, error) {
return p.Info.Schema, nil
},
}
TypeMetaFieldDef = &FieldDefinition{
Name: "__type",
Type: __Type,
Description: "Request the type information of a single type.",
Args: []*Argument{
&Argument{
PrivateName: "name",
Type: NewNonNull(String),
},
},
Resolve: func(p ResolveParams) (interface{}, error) {
name, ok := p.Args["name"].(string)
if !ok {
return nil, nil
}
return p.Info.Schema.Type(name), nil
},
}
TypeNameMetaFieldDef = &FieldDefinition{
Name: "__typename",
Type: NewNonNull(String),
Description: "The name of the current Object type at runtime.",
Args: []*Argument{},
Resolve: func(p ResolveParams) (interface{}, error) {
return p.Info.ParentType.Name(), nil
},
}
}
/**
* Produces a GraphQL Value AST given a Golang value.
*
* Optionally, a GraphQL type may be provided, which will be used to
* disambiguate between value primitives.
*
* | JSON Value | GraphQL Value |
* | ------------- | -------------------- |
* | Object | Input Object |
* | Array | List |
* | Boolean | Boolean |
* | String | String / Enum Value |
* | Number | Int / Float |
*
*/
func astFromValue(value interface{}, ttype Type) ast.Value {
if ttype, ok := ttype.(*NonNull); ok {
// Note: we're not checking that the result is non-null.
// This function is not responsible for validating the input value.
val := astFromValue(value, ttype.OfType)
return val
}
if isNullish(value) {
return nil
}
valueVal := reflect.ValueOf(value)
if !valueVal.IsValid() {
return nil
}
if valueVal.Type().Kind() == reflect.Ptr {
valueVal = valueVal.Elem()
}
if !valueVal.IsValid() {
return nil
}
// Convert Golang slice to GraphQL list. If the Type is a list, but
// the value is not an array, convert the value using the list's item type.
if ttype, ok := ttype.(*List); ok {
if valueVal.Type().Kind() == reflect.Slice {
itemType := ttype.OfType
values := []ast.Value{}
for i := 0; i < valueVal.Len(); i++ {
item := valueVal.Index(i).Interface()
itemAST := astFromValue(item, itemType)
if itemAST != nil {
values = append(values, itemAST)
}
}
return ast.NewListValue(&ast.ListValue{
Values: values,
})
} else {
// Because GraphQL will accept single values as a "list of one" when
// expecting a list, if there's a non-array value and an expected list type,
// create an AST using the list's item type.
val := astFromValue(value, ttype.OfType)
return val
}
}
if valueVal.Type().Kind() == reflect.Map {
// TODO: implement astFromValue from Map to Value
}
if value, ok := value.(bool); ok {
return ast.NewBooleanValue(&ast.BooleanValue{
Value: value,
})
}
if value, ok := value.(int); ok {
if ttype == Float {
return ast.NewIntValue(&ast.IntValue{
Value: fmt.Sprintf("%v.0", value),
})
}
return ast.NewIntValue(&ast.IntValue{
Value: fmt.Sprintf("%v", value),
})
}
if value, ok := value.(float32); ok {
return ast.NewFloatValue(&ast.FloatValue{
Value: fmt.Sprintf("%v", value),
})
}
if value, ok := value.(float64); ok {
return ast.NewFloatValue(&ast.FloatValue{
Value: fmt.Sprintf("%v", value),
})
}
if value, ok := value.(string); ok {
if _, ok := ttype.(*Enum); ok {
return ast.NewEnumValue(&ast.EnumValue{
Value: fmt.Sprintf("%v", value),
})
}
return ast.NewStringValue(&ast.StringValue{
Value: fmt.Sprintf("%v", value),
})
}
// fallback, treat as string
return ast.NewStringValue(&ast.StringValue{
Value: fmt.Sprintf("%v", value),
})
}
|
/*
* Copyright 2013 Nan Deng
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package msgcenter
import (
"crypto/rand"
"crypto/rsa"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/uniqush/uniqush-conn/msgcache"
"github.com/uniqush/uniqush-conn/proto"
"github.com/uniqush/uniqush-conn/proto/client"
"io"
"net"
"sync"
"testing"
"time"
)
func getCache() msgcache.Cache {
db := 1
c, _ := redis.Dial("tcp", "localhost:6379")
c.Do("SELECT", db)
c.Do("FLUSHDB")
c.Close()
return msgcache.NewRedisMessageCache("", "", db)
}
type alwaysAllowAuth struct{}
func (self *alwaysAllowAuth) Authenticate(service, user, token, addr string) (bool, error) {
return true, nil
}
type chanReporter struct {
msgChan chan<- *proto.Message
errChan chan<- error
}
func (self *chanReporter) OnMessage(connId string, msg *proto.Message) {
if self.msgChan != nil {
self.msgChan <- msg
}
}
func (self *chanReporter) OnError(service, username, connId, addr string, err error) {
if self.errChan != nil {
self.errChan <- fmt.Errorf("[Service=%v][Username=%v] %v", service, username, err)
}
}
type nolimitServiceConfigReader struct {
msgChan chan<- *proto.Message
errChan chan<- error
}
func (self *nolimitServiceConfigReader) ReadConfig(service string) *ServiceConfig {
config := new(ServiceConfig)
chr := &chanReporter{self.msgChan, self.errChan}
config.ErrorHandler = chr
config.MessageHandler = chr
config.MsgCache = getCache()
return config
}
func getMessageCenter(addr string, msgChan chan<- *proto.Message, errChan chan<- error) (center *MessageCenter, pubkey *rsa.PublicKey, err error) {
ln, err := net.Listen("tcp", addr)
if err != nil {
return
}
privkey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return
}
pubkey = &privkey.PublicKey
authtimeout := 3 * time.Second
creader := &nolimitServiceConfigReader{msgChan, errChan}
chr := &chanReporter{nil, errChan}
center = NewMessageCenter(ln, privkey, chr, authtimeout, &alwaysAllowAuth{}, creader)
return
}
func connectServer(addr, username string, pub *rsa.PublicKey, digestChan chan<- *client.Digest) (conn client.Conn, err error) {
c, err := net.Dial("tcp", addr)
if err != nil {
return
}
conn, err = client.Dial(c, pub, "service", username, "token", 10*time.Second)
return
}
func server2client(center *MessageCenter, clients []client.Conn, errChan chan<- error, msgs ...*proto.Message) {
for _, client := range clients {
if client == nil {
continue
}
for _, msg := range msgs {
center.SendMessage(client.Service(), client.Username(), msg, nil, 0*time.Second)
}
}
}
func testClientReceived(client client.Conn, errChan chan<- error, msgs ...*proto.Message) {
for _, msg := range msgs {
m, err := client.ReadMessage()
if err != nil {
errChan <- err
continue
}
if !m.EqContent(msg) {
errChan <- fmt.Errorf("[client=%v] %v != %v", client.Username(), m, msg)
}
}
}
func reportError(errChan <-chan error, t *testing.T) {
for err := range errChan {
if err != nil {
t.Errorf("Error: %v", err)
}
}
}
func randomMessage() *proto.Message {
msg := new(proto.Message)
msg.Body = make([]byte, 10)
io.ReadFull(rand.Reader, msg.Body)
msg.Header = make(map[string]string, 2)
msg.Header["aaa"] = "hello"
msg.Header["aa"] = "hell"
return msg
}
func TestServerSendToClients(t *testing.T) {
addr := "127.0.0.1:8964"
N := 10
errChan := make(chan error)
go reportError(errChan, t)
defer close(errChan)
center, pubkey, err := getMessageCenter(addr, nil, errChan)
if err != nil {
t.Errorf("Error: %v", err)
return
}
go center.Start()
clients := make([]client.Conn, N)
wg := new(sync.WaitGroup)
msg := randomMessage()
for i, _ := range clients {
username := fmt.Sprintf("user-%v", i)
client, err := connectServer(addr, username, pubkey, nil)
if err != nil {
t.Errorf("Error: %v", err)
return
}
clients[i] = client
wg.Add(1)
go func() {
testClientReceived(client, errChan, msg)
wg.Done()
}()
}
server2client(center, clients, errChan, msg)
wg.Wait()
}
func receiveAndCompareMessages(msgChan <-chan *proto.Message, msgs map[string]*proto.Message, errChan chan<- error) {
for msg := range msgChan {
if m, ok := msgs[msg.Sender]; ok {
if !m.EqContent(msg) {
errChan <- fmt.Errorf("user %v should receive %v; but got %v", msg.Sender, m, msg)
}
} else {
errChan <- fmt.Errorf("Received message from unknown user: %v.", msg.Sender)
}
}
}
func TestClientsSendToServer(t *testing.T) {
addr := "127.0.0.1:8965"
N := 10
errChan := make(chan error)
go reportError(errChan, t)
defer close(errChan)
msgChan := make(chan *proto.Message)
center, pubkey, err := getMessageCenter(addr, msgChan, errChan)
if err != nil {
t.Errorf("Error: %v", err)
return
}
go center.Start()
clients := make([]client.Conn, N)
msgs := make(map[string]*proto.Message, N)
for i, _ := range clients {
username := fmt.Sprintf("user-%v", i)
client, err := connectServer(addr, username, pubkey, nil)
if err != nil {
t.Errorf("Error: %v", err)
return
}
msg := randomMessage()
msgs[username] = msg
clients[i] = client
}
go receiveAndCompareMessages(msgChan, msgs, errChan)
defer close(msgChan)
wg := new(sync.WaitGroup)
wg.Add(N)
for _, client := range clients {
msg := msgs[client.Username()]
go func() {
client.SendMessage(msg)
wg.Done()
}()
}
wg.Wait()
}
fixed race condition detected by go test -race
/*
* Copyright 2013 Nan Deng
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package msgcenter
import (
"crypto/rand"
"crypto/rsa"
"fmt"
"github.com/garyburd/redigo/redis"
"github.com/uniqush/uniqush-conn/msgcache"
"github.com/uniqush/uniqush-conn/proto"
"github.com/uniqush/uniqush-conn/proto/client"
"io"
"net"
"sync"
"testing"
"time"
)
func getCache() msgcache.Cache {
db := 1
c, _ := redis.Dial("tcp", "localhost:6379")
c.Do("SELECT", db)
c.Do("FLUSHDB")
c.Close()
return msgcache.NewRedisMessageCache("", "", db)
}
type alwaysAllowAuth struct{}
func (self *alwaysAllowAuth) Authenticate(service, user, token, addr string) (bool, error) {
return true, nil
}
type chanReporter struct {
msgChan chan<- *proto.Message
errChan chan<- error
}
func (self *chanReporter) OnMessage(connId string, msg *proto.Message) {
if self.msgChan != nil {
self.msgChan <- msg
}
}
func (self *chanReporter) OnError(service, username, connId, addr string, err error) {
if self.errChan != nil {
self.errChan <- fmt.Errorf("[Service=%v][Username=%v] %v", service, username, err)
}
}
type nolimitServiceConfigReader struct {
msgChan chan<- *proto.Message
errChan chan<- error
}
func (self *nolimitServiceConfigReader) ReadConfig(service string) *ServiceConfig {
config := new(ServiceConfig)
chr := &chanReporter{self.msgChan, self.errChan}
config.ErrorHandler = chr
config.MessageHandler = chr
config.MsgCache = getCache()
return config
}
func getMessageCenter(addr string, msgChan chan<- *proto.Message, errChan chan<- error) (center *MessageCenter, pubkey *rsa.PublicKey, err error) {
ln, err := net.Listen("tcp", addr)
if err != nil {
return
}
privkey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return
}
pubkey = &privkey.PublicKey
authtimeout := 3 * time.Second
creader := &nolimitServiceConfigReader{msgChan, errChan}
chr := &chanReporter{nil, errChan}
center = NewMessageCenter(ln, privkey, chr, authtimeout, &alwaysAllowAuth{}, creader)
return
}
func connectServer(addr, username string, pub *rsa.PublicKey, digestChan chan<- *client.Digest) (conn client.Conn, err error) {
c, err := net.Dial("tcp", addr)
if err != nil {
return
}
conn, err = client.Dial(c, pub, "service", username, "token", 10*time.Second)
return
}
func server2client(center *MessageCenter, clients []client.Conn, errChan chan<- error, msgs ...*proto.Message) {
for _, client := range clients {
if client == nil {
continue
}
for _, msg := range msgs {
center.SendMessage(client.Service(), client.Username(), msg, nil, 0*time.Second)
}
}
}
func testClientReceived(client client.Conn, errChan chan<- error, msgs ...*proto.Message) {
for _, msg := range msgs {
m, err := client.ReadMessage()
if err != nil {
errChan <- err
continue
}
if !m.EqContent(msg) {
errChan <- fmt.Errorf("[client=%v] %v != %v", client.Username(), m, msg)
}
}
}
func reportError(errChan <-chan error, t *testing.T) {
for err := range errChan {
if err != nil {
t.Errorf("Error: %v", err)
}
}
}
func randomMessage() *proto.Message {
msg := new(proto.Message)
msg.Body = make([]byte, 10)
io.ReadFull(rand.Reader, msg.Body)
msg.Header = make(map[string]string, 2)
msg.Header["aaa"] = "hello"
msg.Header["aa"] = "hell"
return msg
}
func TestServerSendToClients(t *testing.T) {
addr := "127.0.0.1:8964"
N := 10
errChan := make(chan error)
go reportError(errChan, t)
defer close(errChan)
center, pubkey, err := getMessageCenter(addr, nil, errChan)
if err != nil {
t.Errorf("Error: %v", err)
return
}
go center.Start()
clients := make([]client.Conn, N)
wg := new(sync.WaitGroup)
msg := randomMessage()
for i, _ := range clients {
username := fmt.Sprintf("user-%v", i)
client, err := connectServer(addr, username, pubkey, nil)
if err != nil {
t.Errorf("Error: %v", err)
return
}
clients[i] = client
wg.Add(1)
go func() {
testClientReceived(client, errChan, msg)
wg.Done()
}()
}
server2client(center, clients, errChan, msg)
wg.Wait()
}
func receiveAndCompareMessages(msgChan <-chan *proto.Message, msgs map[string]*proto.Message, errChan chan<- error) {
for msg := range msgChan {
if m, ok := msgs[msg.Sender]; ok {
if !m.EqContent(msg) {
errChan <- fmt.Errorf("user %v should receive %v; but got %v", msg.Sender, m, msg)
}
} else {
errChan <- fmt.Errorf("Received message from unknown user: %v.", msg.Sender)
}
}
}
func TestClientsSendToServer(t *testing.T) {
addr := "127.0.0.1:8965"
N := 10
errChan := make(chan error)
go reportError(errChan, t)
defer close(errChan)
msgChan := make(chan *proto.Message)
center, pubkey, err := getMessageCenter(addr, msgChan, errChan)
if err != nil {
t.Errorf("Error: %v", err)
return
}
go center.Start()
clients := make([]client.Conn, N)
msgs := make(map[string]*proto.Message, N)
for i, _ := range clients {
username := fmt.Sprintf("user-%v", i)
client, err := connectServer(addr, username, pubkey, nil)
if err != nil {
t.Errorf("Error: %v", err)
return
}
msg := randomMessage()
msgs[username] = msg
clients[i] = client
}
go receiveAndCompareMessages(msgChan, msgs, errChan)
defer close(msgChan)
wg := new(sync.WaitGroup)
wg.Add(N)
start := make(chan bool)
for _, client := range clients {
go func() {
<-start
msg := msgs[client.Username()]
client.SendMessage(msg)
wg.Done()
}()
}
close(start)
wg.Wait()
}
|
package ipfix
import (
"errors"
"io"
"sync"
)
type IPFIXDecoder struct {
reader *Reader
}
type MessageHeader struct {
Version uint16 // Version of IPFIX to which this Message conforms
Length uint16 // Total length of the IPFIX Message, measured in octets
ExportTime uint32 // Time at which the IPFIX Message Header leaves the Exporter
SequenceNo uint32 // Incremental sequence counter modulo 2^32
DomainID uint32 // A 32-bit id that is locally unique to the Exporting Process
}
type Message struct {
Header MessageHeader
TemplateSets []TemplateSet
DataSets []DataSet
}
type TemplateSet struct {
}
type DataSet struct {
}
type Session struct {
buff *sync.Pool
}
type SetHeader struct {
SetID uint16
Length uint16
}
var (
errInvalidVersion = errors.New("invalid ipfix version")
)
func NewDecoder(r io.Reader) (*IPFIXDecoder, error) {
data := make([]byte, 1500)
n, err := r.Read(data)
if err != nil {
return nil, err
}
return &IPFIXDecoder{NewReader(data[:n])}, nil
}
func (d *IPFIXDecoder) Decode() error {
var (
msg Message
err error
)
// IPFIX Message Header decoding
if err = msg.Header.unmarshal(d.reader); err != nil {
return err
}
// IPFIX Message Header validation
if err = msg.Header.validate(); err != nil {
return err
}
for d.reader.Len() > 0 {
setHeader := new(SetHeader)
setHeader.unmarshal(d.reader)
if setHeader.Length < 4 {
return io.ErrUnexpectedEOF
}
switch {
case setHeader.SetID == 2:
// Template set
println("Template")
case setHeader.SetID == 3:
println("option")
case setHeader.SetID >= 4 && setHeader.SetID <= 255:
println("silent")
default:
println("data")
}
break
}
return nil
}
// RFC 7011 - part 3.1. Message Header Format
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Version Number | Length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Export Time |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Sequence Number |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Observation Domain ID |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
func (h *MessageHeader) unmarshal(r *Reader) error {
var err error
if h.Version, err = r.Uint16(); err != nil {
return err
}
if h.Length, err = r.Uint16(); err != nil {
return err
}
if h.ExportTime, err = r.Uint32(); err != nil {
return err
}
if h.SequenceNo, err = r.Uint32(); err != nil {
return err
}
if h.DomainID, err = r.Uint32(); err != nil {
return err
}
return nil
}
func (h *MessageHeader) validate() error {
if h.Version != 0x000a {
return errInvalidVersion
}
// TODO: needs more validation
return nil
}
// RFC 7011 - part 3.3.2 Set Header Format
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Set ID | Length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
func (h *SetHeader) unmarshal(r *Reader) error {
var err error
if h.SetID, err = r.Uint16(); err != nil {
return err
}
if h.Length, err = r.Uint16(); err != nil {
return err
}
return nil
}
add template header decoder
package ipfix
import (
"errors"
"io"
"sync"
)
type IPFIXDecoder struct {
reader *Reader
}
type MessageHeader struct {
Version uint16 // Version of IPFIX to which this Message conforms
Length uint16 // Total length of the IPFIX Message, measured in octets
ExportTime uint32 // Time at which the IPFIX Message Header leaves the Exporter
SequenceNo uint32 // Incremental sequence counter modulo 2^32
DomainID uint32 // A 32-bit id that is locally unique to the Exporting Process
}
type TemplateHeader struct {
TemplateID uint16
FieldCount uint16
}
type Message struct {
Header MessageHeader
TemplateSets []TemplateSet
DataSets []DataSet
}
type TemplateSet struct {
}
type DataSet struct {
}
type Session struct {
buff *sync.Pool
}
type SetHeader struct {
SetID uint16
Length uint16
}
var (
errInvalidVersion = errors.New("invalid ipfix version")
)
func NewDecoder(r io.Reader) (*IPFIXDecoder, error) {
data := make([]byte, 1500)
n, err := r.Read(data)
if err != nil {
return nil, err
}
return &IPFIXDecoder{NewReader(data[:n])}, nil
}
func (d *IPFIXDecoder) Decode() error {
var (
msg Message
err error
)
// IPFIX Message Header decoding
if err = msg.Header.unmarshal(d.reader); err != nil {
return err
}
// IPFIX Message Header validation
if err = msg.Header.validate(); err != nil {
return err
}
for d.reader.Len() > 0 {
setHeader := new(SetHeader)
setHeader.unmarshal(d.reader)
if setHeader.Length < 4 {
return io.ErrUnexpectedEOF
}
switch {
case setHeader.SetID == 2:
// Template set
ts := new(TemplateSet)
ts.unmarshal(d.reader)
case setHeader.SetID == 3:
// Option set
case setHeader.SetID >= 4 && setHeader.SetID <= 255:
// Reserved
default:
// data
}
break
}
return nil
}
// RFC 7011 - part 3.1. Message Header Format
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Version Number | Length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Export Time |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Sequence Number |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Observation Domain ID |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
func (h *MessageHeader) unmarshal(r *Reader) error {
var err error
if h.Version, err = r.Uint16(); err != nil {
return err
}
if h.Length, err = r.Uint16(); err != nil {
return err
}
if h.ExportTime, err = r.Uint32(); err != nil {
return err
}
if h.SequenceNo, err = r.Uint32(); err != nil {
return err
}
if h.DomainID, err = r.Uint32(); err != nil {
return err
}
return nil
}
func (h *MessageHeader) validate() error {
if h.Version != 0x000a {
return errInvalidVersion
}
// TODO: needs more validation
return nil
}
// RFC 7011 - part 3.3.2 Set Header Format
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Set ID | Length |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
func (h *SetHeader) unmarshal(r *Reader) error {
var err error
if h.SetID, err = r.Uint16(); err != nil {
return err
}
if h.Length, err = r.Uint16(); err != nil {
return err
}
return nil
}
// RFC 7011
// 0 1 2 3
// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// | Template ID | Field Count |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
func (t *TemplateHeader) unmarshal(r *Reader) error {
var err error
if t.TemplateID, err = r.Uint16(); err != nil {
return err
}
if t.FieldCount, err = r.Uint16(); err != nil {
return err
}
return nil
}
func (t *TemplateSet) unmarshal(r *Reader) error {
th := new(TemplateHeader)
th.unmarshal(r)
println(th.TemplateID, th.FieldCount)
return nil
}
|
package help
import (
"bytes"
"text/template"
)
type Resource struct {
Name string
Commands []Argument
}
type Command struct {
Brief []string
Arguments []Argument
}
type Argument struct {
Name string
Description []string
}
var resourceHelpTemplate = `Available {{.Name}} commands:
{{range .Commands}}
{{.Name}}
{{range .Description}}{{ printf "\t\t" }}{{ . }}{{ printf "\n" }}{{end}}
{{end}}`
var commandHelpTemplate = `{{range .Brief}}{{ . }}{{ printf " " }}{{ end }}
PARAMETERS:
{{range .Arguments}}
{{.Name}}
{{range .Description}} {{ printf "\t\t" }} {{ . }} {{ printf "\n" }} {{end}}
{{end}}
OPTIONS:
--help
Shows general help or help for the given resource or command.
--user
Specifies a user name for the account.
--password
Specifies the password for the given user.
--profile
Specifies a profile to use (one from the config file).
--output
Specifies the output format - either 'json', 'text' or 'table'.
Defaults to 'json'.
--generate-cli-skeleton
If specified, the command is not actually executed. Instead, JSON with all of
its arguments is returned. All the arguments and options specified along with
this option are included in the output.
--from-file
Specifies a JSON file to load command arguments and options from. No other
arguments or options can be specified.
--query
Restricts the fields in the result to only those specified with this option.
Multiple fields can be separated by comma. Nested fields can be queried using dot.
Multiple nested fields are also separated by comma and must be enclosed in curly
braces on the deepest level of nesting. Note, that a shell may treat curly braces
in a special way so put the whole query in quotes to avoid errors.
Aliases may be set for the nested fields using semicolons inside the braces.
An example:
clc server list --query "details.IP-addresses.{I:internal,P:public}"
--filter
Filters out the returned entities that do not match the given conditions. Multiple
conditions are separated via comma. Each condition consists of a field, an
operation and a value. Supported operations are:
= equals, applicable to strings, numbers and booleans
^= (starts with), $= (ends with), ~= (contains) these three are only for strings
<,<=,>,>= comparison operators, can be used with numbers and strings
--trace
If specified, prints out all the HTTP request/response data.
ENVIRONMENT VARIABLES:
CLC_USER Specifies a user name for the account.
CLC_PASSWORD Specifies the password for the given user.
CLC_PROFILE Specifies a profile to use (one from the config file).
CLC_TRACE If specified (any non-empty value fits), prints out all the HTTP request/response data.
`
func ForCommand(cmd Command) string {
tmpl, err := template.New("command help").Parse(commandHelpTemplate)
if err != nil {
panic(err)
}
buf := bytes.NewBuffer([]byte{})
err = tmpl.Execute(buf, cmd)
if err != nil {
panic(err)
}
return string(buf.Bytes())
}
func ForResource(r Resource) string {
tmpl, err := template.New("resource help").Parse(resourceHelpTemplate)
if err != nil {
panic(err)
}
buf := bytes.NewBuffer([]byte{})
err = tmpl.Execute(buf, r)
if err != nil {
panic(err)
}
return string(buf.Bytes())
}
Make the "parameters" block in command help template conditional
package help
import (
"bytes"
"text/template"
)
type Resource struct {
Name string
Commands []Argument
}
type Command struct {
Brief []string
Arguments []Argument
}
type Argument struct {
Name string
Description []string
}
var resourceHelpTemplate = `Available {{.Name}} commands:
{{range .Commands}}
{{.Name}}
{{range .Description}}{{ printf "\t\t" }}{{ . }}{{ printf "\n" }}{{end}}
{{end}}`
var commandHelpTemplate = `{{range .Brief}}{{ . }}{{ printf " " }}{{ end }}
{{if .Arguments}}PARAMETERS:
{{range .Arguments}}
{{.Name}}
{{range .Description}} {{ printf "\t\t" }} {{ . }} {{ printf "\n" }} {{end}}
{{end}}
{{end}}OPTIONS:
--help
Shows general help or help for the given resource or command.
--user
Specifies a user name for the account.
--password
Specifies the password for the given user.
--profile
Specifies a profile to use (one from the config file).
--output
Specifies the output format - either 'json', 'text' or 'table'.
Defaults to 'json'.
--generate-cli-skeleton
If specified, the command is not actually executed. Instead, JSON with all of
its arguments is returned. All the arguments and options specified along with
this option are included in the output.
--from-file
Specifies a JSON file to load command arguments and options from. No other
arguments or options can be specified.
--query
Restricts the fields in the result to only those specified with this option.
Multiple fields can be separated by comma. Nested fields can be queried using dot.
Multiple nested fields are also separated by comma and must be enclosed in curly
braces on the deepest level of nesting. Note, that a shell may treat curly braces
in a special way so put the whole query in quotes to avoid errors.
Aliases may be set for the nested fields using semicolons inside the braces.
An example:
clc server list --query "details.IP-addresses.{I:internal,P:public}"
--filter
Filters out the returned entities that do not match the given conditions. Multiple
conditions are separated via comma. Each condition consists of a field, an
operation and a value. Supported operations are:
= equals, applicable to strings, numbers and booleans
^= (starts with), $= (ends with), ~= (contains) these three are only for strings
<,<=,>,>= comparison operators, can be used with numbers and strings
--trace
If specified, prints out all the HTTP request/response data.
ENVIRONMENT VARIABLES:
CLC_USER Specifies a user name for the account.
CLC_PASSWORD Specifies the password for the given user.
CLC_PROFILE Specifies a profile to use (one from the config file).
CLC_TRACE If specified (any non-empty value fits), prints out all the HTTP request/response data.
`
func ForCommand(cmd Command) string {
tmpl, err := template.New("command help").Parse(commandHelpTemplate)
if err != nil {
panic(err)
}
buf := bytes.NewBuffer([]byte{})
err = tmpl.Execute(buf, cmd)
if err != nil {
panic(err)
}
return string(buf.Bytes())
}
func ForResource(r Resource) string {
tmpl, err := template.New("resource help").Parse(resourceHelpTemplate)
if err != nil {
panic(err)
}
buf := bytes.NewBuffer([]byte{})
err = tmpl.Execute(buf, r)
if err != nil {
panic(err)
}
return string(buf.Bytes())
}
|
// Copyright 2017 Zack Guo <zack.y.guo@gmail.com>. All rights reserved.
// Use of this source code is governed by a MIT license that can
// be found in the LICENSE file.
package termui
import (
"fmt"
"math"
)
// only 16 possible combinations, why bother
var braillePatterns = map[[2]int]rune{
[2]int{0, 0}: '⣀',
[2]int{0, 1}: '⡠',
[2]int{0, 2}: '⡐',
[2]int{0, 3}: '⡈',
[2]int{1, 0}: '⢄',
[2]int{1, 1}: '⠤',
[2]int{1, 2}: '⠔',
[2]int{1, 3}: '⠌',
[2]int{2, 0}: '⢂',
[2]int{2, 1}: '⠢',
[2]int{2, 2}: '⠒',
[2]int{2, 3}: '⠊',
[2]int{3, 0}: '⢁',
[2]int{3, 1}: '⠡',
[2]int{3, 2}: '⠑',
[2]int{3, 3}: '⠉',
}
var lSingleBraille = [4]rune{'\u2840', '⠄', '⠂', '⠁'}
var rSingleBraille = [4]rune{'\u2880', '⠠', '⠐', '⠈'}
// LineChart has two modes: braille(default) and dot. Using braille gives 2x capicity as dot mode,
// because one braille char can represent two data points.
/*
lc := termui.NewLineChart()
lc.BorderLabel = "braille-mode Line Chart"
lc.Data = [1.2, 1.3, 1.5, 1.7, 1.5, 1.6, 1.8, 2.0]
lc.Width = 50
lc.Height = 12
lc.AxesColor = termui.ColorWhite
lc.LineColor = termui.ColorGreen | termui.AttrBold
// termui.Render(lc)...
*/
type LineChart struct {
Block
Data []float64
DataLabels []string // if unset, the data indices will be used
Mode string // braille | dot
DotStyle rune
LineColor Attribute
scale float64 // data span per cell on y-axis
AxesColor Attribute
drawingX int
drawingY int
axisYHeight int
axisXWidth int
axisYLabelGap int
axisXLabelGap int
topValue float64
bottomValue float64
labelX [][]rune
labelY [][]rune
labelYSpace int
maxY float64
minY float64
autoLabels bool
}
// NewLineChart returns a new LineChart with current theme.
func NewLineChart() *LineChart {
lc := &LineChart{Block: *NewBlock()}
lc.AxesColor = ThemeAttr("linechart.axes.fg")
lc.LineColor = ThemeAttr("linechart.line.fg")
lc.Mode = "braille"
lc.DotStyle = '•'
lc.axisXLabelGap = 2
lc.axisYLabelGap = 1
lc.bottomValue = math.Inf(1)
lc.topValue = math.Inf(-1)
return lc
}
// one cell contains two data points
// so the capicity is 2x as dot-mode
func (lc *LineChart) renderBraille() Buffer {
buf := NewBuffer()
// return: b -> which cell should the point be in
// m -> in the cell, divided into 4 equal height levels, which subcell?
getPos := func(d float64) (b, m int) {
cnt4 := int((d-lc.bottomValue)/(lc.scale/4) + 0.5)
b = cnt4 / 4
m = cnt4 % 4
return
}
// plot points
for i := 0; 2*i+1 < len(lc.Data) && i < lc.axisXWidth; i++ {
b0, m0 := getPos(lc.Data[2*i])
b1, m1 := getPos(lc.Data[2*i+1])
if b0 == b1 {
c := Cell{
Ch: braillePatterns[[2]int{m0, m1}],
Bg: lc.Bg,
Fg: lc.LineColor,
}
y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b0
x := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
buf.Set(x, y, c)
} else {
c0 := Cell{Ch: lSingleBraille[m0],
Fg: lc.LineColor,
Bg: lc.Bg}
x0 := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
y0 := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b0
buf.Set(x0, y0, c0)
c1 := Cell{Ch: rSingleBraille[m1],
Fg: lc.LineColor,
Bg: lc.Bg}
x1 := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
y1 := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b1
buf.Set(x1, y1, c1)
}
}
return buf
}
func (lc *LineChart) renderDot() Buffer {
buf := NewBuffer()
for i := 0; i < len(lc.Data) && i < lc.axisXWidth; i++ {
c := Cell{
Ch: lc.DotStyle,
Fg: lc.LineColor,
Bg: lc.Bg,
}
x := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - int((lc.Data[i]-lc.bottomValue)/lc.scale+0.5)
buf.Set(x, y, c)
}
return buf
}
func (lc *LineChart) calcLabelX() {
lc.labelX = [][]rune{}
for i, l := 0, 0; i < len(lc.DataLabels) && l < lc.axisXWidth; i++ {
if lc.Mode == "dot" {
if l >= len(lc.DataLabels) {
break
}
s := str2runes(lc.DataLabels[l])
w := strWidth(lc.DataLabels[l])
if l+w <= lc.axisXWidth {
lc.labelX = append(lc.labelX, s)
}
l += w + lc.axisXLabelGap
} else { // braille
if 2*l >= len(lc.DataLabels) {
break
}
s := str2runes(lc.DataLabels[2*l])
w := strWidth(lc.DataLabels[2*l])
if l+w <= lc.axisXWidth {
lc.labelX = append(lc.labelX, s)
}
l += w + lc.axisXLabelGap
}
}
}
func shortenFloatVal(x float64) string {
s := fmt.Sprintf("%.2f", x)
if len(s)-3 > 3 {
s = fmt.Sprintf("%.2e", x)
}
if x < 0 {
s = fmt.Sprintf("%.2f", x)
}
return s
}
func (lc *LineChart) calcLabelY() {
span := lc.topValue - lc.bottomValue
lc.scale = span / float64(lc.axisYHeight)
n := (1 + lc.axisYHeight) / (lc.axisYLabelGap + 1)
lc.labelY = make([][]rune, n)
maxLen := 0
for i := 0; i < n; i++ {
s := str2runes(shortenFloatVal(lc.bottomValue + float64(i)*span/float64(n)))
if len(s) > maxLen {
maxLen = len(s)
}
lc.labelY[i] = s
}
lc.labelYSpace = maxLen
}
func (lc *LineChart) calcLayout() {
// set datalabels if it is not provided
if (lc.DataLabels == nil || len(lc.DataLabels) == 0) || lc.autoLabels {
lc.autoLabels = true
lc.DataLabels = make([]string, len(lc.Data))
for i := range lc.Data {
lc.DataLabels[i] = fmt.Sprint(i)
}
}
// lazy increase, to avoid y shaking frequently
// update bound Y when drawing is gonna overflow
lc.minY = lc.Data[0]
lc.maxY = lc.Data[0]
// valid visible range
vrange := lc.innerArea.Dx()
if lc.Mode == "braille" {
vrange = 2 * lc.innerArea.Dx()
}
if vrange > len(lc.Data) {
vrange = len(lc.Data)
}
for _, v := range lc.Data[:vrange] {
if v > lc.maxY {
lc.maxY = v
}
if v < lc.minY {
lc.minY = v
}
}
span := lc.maxY - lc.minY
if lc.minY < lc.bottomValue {
lc.bottomValue = lc.minY - 0.2*span
}
if lc.maxY > lc.topValue {
lc.topValue = lc.maxY + 0.2*span
}
lc.axisYHeight = lc.innerArea.Dy() - 2
lc.calcLabelY()
lc.axisXWidth = lc.innerArea.Dx() - 1 - lc.labelYSpace
lc.calcLabelX()
lc.drawingX = lc.innerArea.Min.X + 1 + lc.labelYSpace
lc.drawingY = lc.innerArea.Min.Y
}
func (lc *LineChart) plotAxes() Buffer {
buf := NewBuffer()
origY := lc.innerArea.Min.Y + lc.innerArea.Dy() - 2
origX := lc.innerArea.Min.X + lc.labelYSpace
buf.Set(origX, origY, Cell{Ch: ORIGIN, Fg: lc.AxesColor, Bg: lc.Bg})
for x := origX + 1; x < origX+lc.axisXWidth; x++ {
buf.Set(x, origY, Cell{Ch: HDASH, Fg: lc.AxesColor, Bg: lc.Bg})
}
for dy := 1; dy <= lc.axisYHeight; dy++ {
buf.Set(origX, origY-dy, Cell{Ch: VDASH, Fg: lc.AxesColor, Bg: lc.Bg})
}
// x label
oft := 0
for _, rs := range lc.labelX {
if oft+len(rs) > lc.axisXWidth {
break
}
for j, r := range rs {
c := Cell{
Ch: r,
Fg: lc.AxesColor,
Bg: lc.Bg,
}
x := origX + oft + j
y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 1
buf.Set(x, y, c)
}
oft += len(rs) + lc.axisXLabelGap
}
// y labels
for i, rs := range lc.labelY {
for j, r := range rs {
buf.Set(
lc.innerArea.Min.X+j,
origY-i*(lc.axisYLabelGap+1),
Cell{Ch: r, Fg: lc.AxesColor, Bg: lc.Bg})
}
}
return buf
}
// Buffer implements Bufferer interface.
func (lc *LineChart) Buffer() Buffer {
buf := lc.Block.Buffer()
if lc.Data == nil || len(lc.Data) == 0 {
return buf
}
lc.calcLayout()
buf.Merge(lc.plotAxes())
if lc.Mode == "dot" {
buf.Merge(lc.renderDot())
} else {
buf.Merge(lc.renderBraille())
}
return buf
}
spelling: capacity
// Copyright 2017 Zack Guo <zack.y.guo@gmail.com>. All rights reserved.
// Use of this source code is governed by a MIT license that can
// be found in the LICENSE file.
package termui
import (
"fmt"
"math"
)
// only 16 possible combinations, why bother
var braillePatterns = map[[2]int]rune{
[2]int{0, 0}: '⣀',
[2]int{0, 1}: '⡠',
[2]int{0, 2}: '⡐',
[2]int{0, 3}: '⡈',
[2]int{1, 0}: '⢄',
[2]int{1, 1}: '⠤',
[2]int{1, 2}: '⠔',
[2]int{1, 3}: '⠌',
[2]int{2, 0}: '⢂',
[2]int{2, 1}: '⠢',
[2]int{2, 2}: '⠒',
[2]int{2, 3}: '⠊',
[2]int{3, 0}: '⢁',
[2]int{3, 1}: '⠡',
[2]int{3, 2}: '⠑',
[2]int{3, 3}: '⠉',
}
var lSingleBraille = [4]rune{'\u2840', '⠄', '⠂', '⠁'}
var rSingleBraille = [4]rune{'\u2880', '⠠', '⠐', '⠈'}
// LineChart has two modes: braille(default) and dot. Using braille gives 2x capacity as dot mode,
// because one braille char can represent two data points.
/*
lc := termui.NewLineChart()
lc.BorderLabel = "braille-mode Line Chart"
lc.Data = [1.2, 1.3, 1.5, 1.7, 1.5, 1.6, 1.8, 2.0]
lc.Width = 50
lc.Height = 12
lc.AxesColor = termui.ColorWhite
lc.LineColor = termui.ColorGreen | termui.AttrBold
// termui.Render(lc)...
*/
type LineChart struct {
Block
Data []float64
DataLabels []string // if unset, the data indices will be used
Mode string // braille | dot
DotStyle rune
LineColor Attribute
scale float64 // data span per cell on y-axis
AxesColor Attribute
drawingX int
drawingY int
axisYHeight int
axisXWidth int
axisYLabelGap int
axisXLabelGap int
topValue float64
bottomValue float64
labelX [][]rune
labelY [][]rune
labelYSpace int
maxY float64
minY float64
autoLabels bool
}
// NewLineChart returns a new LineChart with current theme.
func NewLineChart() *LineChart {
lc := &LineChart{Block: *NewBlock()}
lc.AxesColor = ThemeAttr("linechart.axes.fg")
lc.LineColor = ThemeAttr("linechart.line.fg")
lc.Mode = "braille"
lc.DotStyle = '•'
lc.axisXLabelGap = 2
lc.axisYLabelGap = 1
lc.bottomValue = math.Inf(1)
lc.topValue = math.Inf(-1)
return lc
}
// one cell contains two data points
// so the capacity is 2x as dot-mode
func (lc *LineChart) renderBraille() Buffer {
buf := NewBuffer()
// return: b -> which cell should the point be in
// m -> in the cell, divided into 4 equal height levels, which subcell?
getPos := func(d float64) (b, m int) {
cnt4 := int((d-lc.bottomValue)/(lc.scale/4) + 0.5)
b = cnt4 / 4
m = cnt4 % 4
return
}
// plot points
for i := 0; 2*i+1 < len(lc.Data) && i < lc.axisXWidth; i++ {
b0, m0 := getPos(lc.Data[2*i])
b1, m1 := getPos(lc.Data[2*i+1])
if b0 == b1 {
c := Cell{
Ch: braillePatterns[[2]int{m0, m1}],
Bg: lc.Bg,
Fg: lc.LineColor,
}
y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b0
x := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
buf.Set(x, y, c)
} else {
c0 := Cell{Ch: lSingleBraille[m0],
Fg: lc.LineColor,
Bg: lc.Bg}
x0 := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
y0 := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b0
buf.Set(x0, y0, c0)
c1 := Cell{Ch: rSingleBraille[m1],
Fg: lc.LineColor,
Bg: lc.Bg}
x1 := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
y1 := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - b1
buf.Set(x1, y1, c1)
}
}
return buf
}
func (lc *LineChart) renderDot() Buffer {
buf := NewBuffer()
for i := 0; i < len(lc.Data) && i < lc.axisXWidth; i++ {
c := Cell{
Ch: lc.DotStyle,
Fg: lc.LineColor,
Bg: lc.Bg,
}
x := lc.innerArea.Min.X + lc.labelYSpace + 1 + i
y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 3 - int((lc.Data[i]-lc.bottomValue)/lc.scale+0.5)
buf.Set(x, y, c)
}
return buf
}
func (lc *LineChart) calcLabelX() {
lc.labelX = [][]rune{}
for i, l := 0, 0; i < len(lc.DataLabels) && l < lc.axisXWidth; i++ {
if lc.Mode == "dot" {
if l >= len(lc.DataLabels) {
break
}
s := str2runes(lc.DataLabels[l])
w := strWidth(lc.DataLabels[l])
if l+w <= lc.axisXWidth {
lc.labelX = append(lc.labelX, s)
}
l += w + lc.axisXLabelGap
} else { // braille
if 2*l >= len(lc.DataLabels) {
break
}
s := str2runes(lc.DataLabels[2*l])
w := strWidth(lc.DataLabels[2*l])
if l+w <= lc.axisXWidth {
lc.labelX = append(lc.labelX, s)
}
l += w + lc.axisXLabelGap
}
}
}
func shortenFloatVal(x float64) string {
s := fmt.Sprintf("%.2f", x)
if len(s)-3 > 3 {
s = fmt.Sprintf("%.2e", x)
}
if x < 0 {
s = fmt.Sprintf("%.2f", x)
}
return s
}
func (lc *LineChart) calcLabelY() {
span := lc.topValue - lc.bottomValue
lc.scale = span / float64(lc.axisYHeight)
n := (1 + lc.axisYHeight) / (lc.axisYLabelGap + 1)
lc.labelY = make([][]rune, n)
maxLen := 0
for i := 0; i < n; i++ {
s := str2runes(shortenFloatVal(lc.bottomValue + float64(i)*span/float64(n)))
if len(s) > maxLen {
maxLen = len(s)
}
lc.labelY[i] = s
}
lc.labelYSpace = maxLen
}
func (lc *LineChart) calcLayout() {
// set datalabels if it is not provided
if (lc.DataLabels == nil || len(lc.DataLabels) == 0) || lc.autoLabels {
lc.autoLabels = true
lc.DataLabels = make([]string, len(lc.Data))
for i := range lc.Data {
lc.DataLabels[i] = fmt.Sprint(i)
}
}
// lazy increase, to avoid y shaking frequently
// update bound Y when drawing is gonna overflow
lc.minY = lc.Data[0]
lc.maxY = lc.Data[0]
// valid visible range
vrange := lc.innerArea.Dx()
if lc.Mode == "braille" {
vrange = 2 * lc.innerArea.Dx()
}
if vrange > len(lc.Data) {
vrange = len(lc.Data)
}
for _, v := range lc.Data[:vrange] {
if v > lc.maxY {
lc.maxY = v
}
if v < lc.minY {
lc.minY = v
}
}
span := lc.maxY - lc.minY
if lc.minY < lc.bottomValue {
lc.bottomValue = lc.minY - 0.2*span
}
if lc.maxY > lc.topValue {
lc.topValue = lc.maxY + 0.2*span
}
lc.axisYHeight = lc.innerArea.Dy() - 2
lc.calcLabelY()
lc.axisXWidth = lc.innerArea.Dx() - 1 - lc.labelYSpace
lc.calcLabelX()
lc.drawingX = lc.innerArea.Min.X + 1 + lc.labelYSpace
lc.drawingY = lc.innerArea.Min.Y
}
func (lc *LineChart) plotAxes() Buffer {
buf := NewBuffer()
origY := lc.innerArea.Min.Y + lc.innerArea.Dy() - 2
origX := lc.innerArea.Min.X + lc.labelYSpace
buf.Set(origX, origY, Cell{Ch: ORIGIN, Fg: lc.AxesColor, Bg: lc.Bg})
for x := origX + 1; x < origX+lc.axisXWidth; x++ {
buf.Set(x, origY, Cell{Ch: HDASH, Fg: lc.AxesColor, Bg: lc.Bg})
}
for dy := 1; dy <= lc.axisYHeight; dy++ {
buf.Set(origX, origY-dy, Cell{Ch: VDASH, Fg: lc.AxesColor, Bg: lc.Bg})
}
// x label
oft := 0
for _, rs := range lc.labelX {
if oft+len(rs) > lc.axisXWidth {
break
}
for j, r := range rs {
c := Cell{
Ch: r,
Fg: lc.AxesColor,
Bg: lc.Bg,
}
x := origX + oft + j
y := lc.innerArea.Min.Y + lc.innerArea.Dy() - 1
buf.Set(x, y, c)
}
oft += len(rs) + lc.axisXLabelGap
}
// y labels
for i, rs := range lc.labelY {
for j, r := range rs {
buf.Set(
lc.innerArea.Min.X+j,
origY-i*(lc.axisYLabelGap+1),
Cell{Ch: r, Fg: lc.AxesColor, Bg: lc.Bg})
}
}
return buf
}
// Buffer implements Bufferer interface.
func (lc *LineChart) Buffer() Buffer {
buf := lc.Block.Buffer()
if lc.Data == nil || len(lc.Data) == 0 {
return buf
}
lc.calcLayout()
buf.Merge(lc.plotAxes())
if lc.Mode == "dot" {
buf.Merge(lc.renderDot())
} else {
buf.Merge(lc.renderBraille())
}
return buf
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tenantnamespace
import (
"context"
"fmt"
tenancyv1alpha1 "github.com/kubernetes-sigs/multi-tenancy/tenant/pkg/apis/tenancy/v1alpha1"
tenantutil "github.com/kubernetes-sigs/multi-tenancy/tenant/pkg/util"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/source"
)
var log = logf.Log.WithName("controller")
const (
// TenantAdminNamespaceAnnotation is the key for tenantAdminNamespace annotation
TenantAdminNamespaceAnnotation = "x-k8s.io/tenantAdminNamespace"
)
// Add creates a new TenantNamespace Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
return &ReconcileTenantNamespace{Client: mgr.GetClient(), scheme: mgr.GetScheme()}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("tenantnamespace-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to TenantNamespace
err = c.Watch(&source.Kind{Type: &tenancyv1alpha1.TenantNamespace{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
// Watch for changes to namespaces
err = c.Watch(&source.Kind{Type: &corev1.Namespace{}}, &enqueueTenantNamespace{})
if err != nil {
return err
}
return nil
}
var _ reconcile.Reconciler = &ReconcileTenantNamespace{}
// ReconcileTenantNamespace reconciles a TenantNamespace object
type ReconcileTenantNamespace struct {
client.Client
scheme *runtime.Scheme
}
// Add a ownerReference and tenant admin namespace annotation to input namespace
func (r *ReconcileTenantNamespace) updateNamespace(ns *corev1.Namespace, tenantAdminNamespaceName *string, ownerRef *metav1.OwnerReference) error {
nsClone := ns.DeepCopy()
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
nsClone.OwnerReferences = append(nsClone.OwnerReferences, *ownerRef)
if nsClone.Annotations == nil {
nsClone.Annotations = make(map[string]string)
}
nsClone.Annotations[TenantAdminNamespaceAnnotation] = *tenantAdminNamespaceName
updateErr := r.Update(context.TODO(), nsClone)
if updateErr == nil {
return nil
}
key := types.NamespacedName{
Name: nsClone.Name,
}
if err := r.Get(context.TODO(), key, nsClone); err != nil {
log.Info("Fail to fetch namespace on update failure", "namespace", nsClone.Name)
}
return updateErr
})
return err
}
// Reconcile reads that state of the cluster for a TenantNamespace object and makes changes based on the state read
// and what is in the TenantNamespace.Spec
// Automatically generate RBAC rules to allow the Controller to read and write Deployments
// +kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch;create
// +kubebuilder:rbac:groups=tenancy.x-k8s.io,resources=tenantnamespaces,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=tenancy.x-k8s.io,resources=tenantnamespaces/status,verbs=get;update;patch
func (r *ReconcileTenantNamespace) Reconcile(request reconcile.Request) (reconcile.Result, error) {
// Fetch the TenantNamespace instance
instance := &tenancyv1alpha1.TenantNamespace{}
err := r.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
// Fetch namespace list
nsList := &corev1.NamespaceList{}
err = r.List(context.TODO(), &client.ListOptions{}, nsList)
if err != nil {
return reconcile.Result{}, err
}
// Fetch tenant list
tenantList := &tenancyv1alpha1.TenantList{}
err = r.List(context.TODO(), &client.ListOptions{}, tenantList)
if err != nil {
return reconcile.Result{}, err
}
// Find the tenant of this instance
requireNamespacePrefix := false
foundTenant := false
var tenantName string
for _, each := range tenantList.Items {
if each.Spec.TenantAdminNamespaceName == instance.Namespace {
requireNamespacePrefix = each.Spec.RequireNamespacePrefix
foundTenant = true
tenantName = each.Name
break
}
}
if !foundTenant {
err = fmt.Errorf("TenantNamespace CR %v does not belong to any tenant", instance)
return reconcile.Result{}, err
}
// In case namespace already exists
tenantNsName := tenantutil.GetTenantNamespaceName(requireNamespacePrefix, instance)
expectedOwnerRef := metav1.OwnerReference{
APIVersion: tenancyv1alpha1.SchemeGroupVersion.String(),
Kind: "TenantNamespace",
Name: instance.Name,
UID: instance.UID,
}
found := false
for _, each := range nsList.Items {
if each.Name == tenantNsName {
found = true
// Check OwnerReference
isOwner := false
for _, ownerRef := range each.OwnerReferences {
if ownerRef == expectedOwnerRef {
isOwner = true
break
} else if ownerRef.APIVersion == expectedOwnerRef.APIVersion && ownerRef.Kind == expectedOwnerRef.Kind {
// The namespace is owned by another TenantNamespace CR, fail the reconcile
err = fmt.Errorf("Namespace %v is owned by another %v TenantNamespace CR", each.Name, ownerRef)
return reconcile.Result{}, err
}
}
if !isOwner {
log.Info("Namespace has been created without TenantNamespace owner", "namespace", each.Name)
// Obtain namespace ownership by setting ownerReference, and add annotation
if err = r.updateNamespace(&each, &instance.Namespace, &expectedOwnerRef); err != nil {
return reconcile.Result{}, err
}
}
break
}
}
// In case a new namespace needs to be created
if !found {
tenantNs := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: tenantNsName,
Annotations: map[string]string{
TenantAdminNamespaceAnnotation: instance.Namespace,
},
OwnerReferences: []metav1.OwnerReference{expectedOwnerRef},
},
}
if err = r.Client.Create(context.TODO(), tenantNs); err != nil {
return reconcile.Result{}, err
}
}
// Update status
instanceClone := instance.DeepCopy()
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
instanceClone.Status.OwnedNamespace = tenantNsName
updateErr := r.Update(context.TODO(), instanceClone)
if updateErr == nil {
return nil
}
if err := r.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, instanceClone); err != nil {
log.Info("Fail to fetch tenantNamespace CR on update", "tenantNamespace", instance.Name)
}
return updateErr
})
if err != nil {
return reconcile.Result{}, err
}
// Update tenant clusterrule to allow tenant admins to access the tenant namespace.
cr := &rbacv1.ClusterRole{}
if err = r.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("%s-tenant-admin-role", tenantName)}, cr); err != nil {
return reconcile.Result{}, err
}
cr = cr.DeepCopy()
foundNsRule := false
needUpdate := true
for i, each := range cr.Rules {
for _, resource := range each.Resources {
if resource == "namespaces" {
foundNsRule = true
break
}
}
if foundNsRule {
for _, resourceName := range each.ResourceNames {
if resourceName == tenantNsName {
needUpdate = false
break
}
}
if needUpdate {
cr.Rules[i].ResourceNames = append(cr.Rules[i].ResourceNames, tenantNsName)
}
break
}
}
if !foundNsRule {
err = fmt.Errorf("Cluster Role %s-tenant-admin-role does not have rules for namespaces.", tenantName)
return reconcile.Result{}, err
}
if needUpdate {
crClone := cr.DeepCopy()
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
crClone.Rules = cr.Rules
updateErr := r.Update(context.TODO(), crClone)
if updateErr == nil {
return nil
}
if err := r.Get(context.TODO(), types.NamespacedName{Name: crClone.Name}, crClone); err != nil {
log.Info("Fail to fetch clusterrole on update", "clusterrole", crClone.Name)
}
return updateErr
})
if err != nil {
return reconcile.Result{}, err
}
}
return reconcile.Result{}, nil
}
Remove namespace from tenant clusterrole when tenantnamespace CR is
deleted
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tenantnamespace
import (
"context"
"fmt"
tenancyv1alpha1 "github.com/kubernetes-sigs/multi-tenancy/tenant/pkg/apis/tenancy/v1alpha1"
tenantutil "github.com/kubernetes-sigs/multi-tenancy/tenant/pkg/util"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
"sigs.k8s.io/controller-runtime/pkg/source"
)
var log = logf.Log.WithName("controller")
const (
// TenantAdminNamespaceAnnotation is the key for tenantAdminNamespace annotation
TenantAdminNamespaceAnnotation = "x-k8s.io/tenantAdminNamespace"
TenantNamespaceFinalizer = "tenantnamespace.finalizer.x-k8s.io"
)
// Add creates a new TenantNamespace Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
return &ReconcileTenantNamespace{Client: mgr.GetClient(), scheme: mgr.GetScheme()}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("tenantnamespace-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to TenantNamespace
err = c.Watch(&source.Kind{Type: &tenancyv1alpha1.TenantNamespace{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
// Watch for changes to namespaces
err = c.Watch(&source.Kind{Type: &corev1.Namespace{}}, &enqueueTenantNamespace{})
if err != nil {
return err
}
return nil
}
var _ reconcile.Reconciler = &ReconcileTenantNamespace{}
// ReconcileTenantNamespace reconciles a TenantNamespace object
type ReconcileTenantNamespace struct {
client.Client
scheme *runtime.Scheme
}
// Add a ownerReference and tenant admin namespace annotation to input namespace
func (r *ReconcileTenantNamespace) updateNamespace(ns *corev1.Namespace, tenantAdminNamespaceName *string, ownerRef *metav1.OwnerReference) error {
nsClone := ns.DeepCopy()
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
nsClone.OwnerReferences = append(nsClone.OwnerReferences, *ownerRef)
if nsClone.Annotations == nil {
nsClone.Annotations = make(map[string]string)
}
nsClone.Annotations[TenantAdminNamespaceAnnotation] = *tenantAdminNamespaceName
updateErr := r.Update(context.TODO(), nsClone)
if updateErr == nil {
return nil
}
key := types.NamespacedName{
Name: nsClone.Name,
}
if err := r.Get(context.TODO(), key, nsClone); err != nil {
log.Info("Fail to fetch namespace on update failure", "namespace", nsClone.Name)
}
return updateErr
})
return err
}
// Reconcile reads that state of the cluster for a TenantNamespace object and makes changes based on the state read
// and what is in the TenantNamespace.Spec
// Automatically generate RBAC rules to allow the Controller to read and write Deployments
// +kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch;create
// +kubebuilder:rbac:groups=tenancy.x-k8s.io,resources=tenantnamespaces,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=tenancy.x-k8s.io,resources=tenantnamespaces/status,verbs=get;update;patch
func (r *ReconcileTenantNamespace) Reconcile(request reconcile.Request) (reconcile.Result, error) {
// Fetch the TenantNamespace instance
instance := &tenancyv1alpha1.TenantNamespace{}
err := r.Get(context.TODO(), request.NamespacedName, instance)
if err != nil {
if errors.IsNotFound(err) {
// Object not found, return. Created objects are automatically garbage collected.
// For additional cleanup logic use finalizers.
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
return reconcile.Result{}, err
}
// Fetch namespace list
nsList := &corev1.NamespaceList{}
err = r.List(context.TODO(), &client.ListOptions{}, nsList)
if err != nil {
return reconcile.Result{}, err
}
// Fetch tenant list
tenantList := &tenancyv1alpha1.TenantList{}
err = r.List(context.TODO(), &client.ListOptions{}, tenantList)
if err != nil {
return reconcile.Result{}, err
}
// Find the tenant of this instance
requireNamespacePrefix := false
var tenantName string
for _, each := range tenantList.Items {
if each.Spec.TenantAdminNamespaceName == instance.Namespace {
requireNamespacePrefix = each.Spec.RequireNamespacePrefix
tenantName = each.Name
break
}
}
if tenantName == "" {
err = fmt.Errorf("TenantNamespace CR %v does not belong to any tenant", instance)
return reconcile.Result{}, err
}
// Handle tenantNamespace CR deletion
if instance.DeletionTimestamp != nil {
// Remove namespace from tenant clusterrole
if err = r.updateTenantClusterRole(tenantName, instance.Status.OwnedNamespace, false); err != nil {
return reconcile.Result{}, err
} else {
instanceClone := instance.DeepCopy()
if containsString(instanceClone.Finalizers, TenantNamespaceFinalizer) {
instanceClone.Finalizers = removeString(instanceClone.Finalizers, TenantNamespaceFinalizer)
}
err = r.Update(context.TODO(), instanceClone)
return reconcile.Result{}, err
}
}
// In case namespace already exists
tenantNsName := tenantutil.GetTenantNamespaceName(requireNamespacePrefix, instance)
expectedOwnerRef := metav1.OwnerReference{
APIVersion: tenancyv1alpha1.SchemeGroupVersion.String(),
Kind: "TenantNamespace",
Name: instance.Name,
UID: instance.UID,
}
found := false
for _, each := range nsList.Items {
if each.Name == tenantNsName {
found = true
// Check OwnerReference
isOwner := false
for _, ownerRef := range each.OwnerReferences {
if ownerRef == expectedOwnerRef {
isOwner = true
break
} else if ownerRef.APIVersion == expectedOwnerRef.APIVersion && ownerRef.Kind == expectedOwnerRef.Kind {
// The namespace is owned by another TenantNamespace CR, fail the reconcile
err = fmt.Errorf("Namespace %v is owned by another %v TenantNamespace CR", each.Name, ownerRef)
return reconcile.Result{}, err
}
}
if !isOwner {
log.Info("Namespace has been created without TenantNamespace owner", "namespace", each.Name)
// Obtain namespace ownership by setting ownerReference, and add annotation
if err = r.updateNamespace(&each, &instance.Namespace, &expectedOwnerRef); err != nil {
return reconcile.Result{}, err
}
}
break
}
}
// In case a new namespace needs to be created
if !found {
tenantNs := &corev1.Namespace{
TypeMeta: metav1.TypeMeta{
APIVersion: corev1.SchemeGroupVersion.String(),
Kind: "Namespace",
},
ObjectMeta: metav1.ObjectMeta{
Name: tenantNsName,
Annotations: map[string]string{
TenantAdminNamespaceAnnotation: instance.Namespace,
},
OwnerReferences: []metav1.OwnerReference{expectedOwnerRef},
},
}
if err = r.Client.Create(context.TODO(), tenantNs); err != nil {
return reconcile.Result{}, err
}
}
// Update status
instanceClone := instance.DeepCopy()
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
if !containsString(instanceClone.Finalizers, TenantNamespaceFinalizer) {
instanceClone.Finalizers = append(instanceClone.Finalizers, TenantNamespaceFinalizer)
}
instanceClone.Status.OwnedNamespace = tenantNsName
updateErr := r.Update(context.TODO(), instanceClone)
if updateErr == nil {
return nil
}
if err := r.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, instanceClone); err != nil {
log.Info("Fail to fetch tenantNamespace CR on update", "tenantNamespace", instance.Name)
}
return updateErr
})
if err != nil {
return reconcile.Result{}, err
}
// Add namespace to tenant clusterrule to allow tenant admins to access it.
err = r.updateTenantClusterRole(tenantName, tenantNsName, true)
return reconcile.Result{}, err
}
// This method updates tenant clusterrule to add or remove the tenant namespace.
func (r *ReconcileTenantNamespace) updateTenantClusterRole(tenantName, tenantNsName string, add bool) error {
var err error
cr := &rbacv1.ClusterRole{}
if err = r.Get(context.TODO(), types.NamespacedName{Name: fmt.Sprintf("%s-tenant-admin-role", tenantName)}, cr); err != nil {
return err
}
cr = cr.DeepCopy()
foundNsRule := false
needUpdate := add
for i, each := range cr.Rules {
for _, resource := range each.Resources {
if resource == "namespaces" {
foundNsRule = true
break
}
}
if foundNsRule {
idx := 0
for ; idx < len(each.ResourceNames); idx++ {
if each.ResourceNames[idx] == tenantNsName {
needUpdate = !add
break
}
}
if needUpdate {
if add {
cr.Rules[i].ResourceNames = append(cr.Rules[i].ResourceNames, tenantNsName)
} else {
cr.Rules[i].ResourceNames = append(cr.Rules[i].ResourceNames[:idx], cr.Rules[i].ResourceNames[idx+1:]...)
}
}
break
}
}
if !foundNsRule {
return fmt.Errorf("Cluster Role %s-tenant-admin-role does not have rules for namespaces.", tenantName)
}
if needUpdate {
crClone := cr.DeepCopy()
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
crClone.Rules = cr.Rules
updateErr := r.Update(context.TODO(), crClone)
if updateErr == nil {
return nil
}
if err := r.Get(context.TODO(), types.NamespacedName{Name: crClone.Name}, crClone); err != nil {
log.Info("Fail to fetch clusterrole on update", "clusterrole", crClone.Name)
}
return updateErr
})
}
return err
}
// Helper functions to check and remove string from a slice of strings.
func containsString(slice []string, s string) bool {
for _, item := range slice {
if item == s {
return true
}
}
return false
}
func removeString(slice []string, s string) (result []string) {
for _, item := range slice {
if item == s {
continue
}
result = append(result, item)
}
return
}
|
package ircmsg
import (
"strings"
"log"
)
type IRCMessage struct {
Source string
Target string
Command string
Args []string
Complete string
}
func ParseServerLine(line string) *IRCMessage {
im := &IRCMessage{"", "", "", make([]string, 0), line}
parts := strings.Fields(line)
if len(parts) == 0 {
log.Println("ParseIrcLine: empty line")
return im
}
// source and target
if parts[0][0] == ':' {
im.Source = strings.Replace(parts[0], ":", "", 1)
im.Command = parts[1]
im.Target = parts[2]
// cut them off
parts = parts[3:]
line = strings.Replace(line, ":", "", 1)
} else {
im.Command = parts[0]
parts = parts[1:]
}
for _, s := range parts {
if s[0] == ':' {
break
}
im.Args = append(im.Args, s)
}
// line has the leading ':' cut off, if it was ever there
// (see "source and target" above)
lastargpos := strings.Index(line, ":")
im.Args = append(im.Args, line[lastargpos+1:])
log.Printf("im: %#v\n", im)
return im
}
ircmsg: fixed parsing, should be a little saver now..
package ircmsg
import (
"strings"
"log"
)
type IRCMessage struct {
Source string
Target string
Command string
Args []string
Complete string
}
func ParseServerLine(line string) *IRCMessage {
im := &IRCMessage{"", "", "", make([]string, 0), line}
if len(line) == 0 || strings.Trim(line, " \t\n\r") == "" {
log.Println("ParseIrcLine: empty line")
return im
}
// source and target
if line[0] == ':' {
parts := strings.SplitN(line[1:], " ", 4) // 4: src cmd target rest
im.Source = parts[0]
im.Command = parts[1]
im.Target = parts[2]
// cut them off
if len(parts) > 3 {
line = parts[3]
} else {
line= ""
}
} else {
parts := strings.SplitN(line, " ", 2) // cmd, rest
im.Command = parts[0]
if len(parts) > 1 {
line= parts[1]
} else {
line= ""
}
}
args := strings.SplitN(line, ":", 2)
for _, a := range strings.Split(args[0], " ") {
if a != "" {
im.Args= append(im.Args, a)
}
}
if len(args) > 1 {
im.Args= append(im.Args, args[1])
}
log.Printf("im: %#v\n", im)
return im
}
|
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License. See the AUTHORS file
// for names of contributors.
//
// Author: Ben Darnell
package multiraft
import (
"fmt"
"reflect"
"sync/atomic"
"testing"
"time"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/leaktest"
"github.com/cockroachdb/cockroach/util/log"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"golang.org/x/net/context"
)
var testRand, _ = util.NewPseudoRand()
func makeCommandID() string {
return string(util.RandBytes(testRand, commandIDLen))
}
type testCluster struct {
t *testing.T
nodes []*state
tickers []*manualTicker
events []*eventDemux
storages []*BlockableStorage
transport Transport
}
func newTestCluster(transport Transport, size int, stopper *util.Stopper, t *testing.T) *testCluster {
if transport == nil {
transport = NewLocalRPCTransport()
}
stopper.AddCloser(transport)
cluster := &testCluster{
t: t,
transport: transport,
}
for i := 0; i < size; i++ {
ticker := newManualTicker()
storage := &BlockableStorage{storage: NewMemoryStorage()}
config := &Config{
Transport: transport,
Storage: storage,
Ticker: ticker,
ElectionTimeoutTicks: 2,
HeartbeatIntervalTicks: 1,
TickInterval: time.Hour, // not in use
Strict: true,
}
mr, err := NewMultiRaft(NodeID(i+1), config)
if err != nil {
t.Fatal(err)
}
state := newState(mr)
demux := newEventDemux(state.Events)
demux.start(stopper)
cluster.nodes = append(cluster.nodes, state)
cluster.tickers = append(cluster.tickers, ticker)
cluster.events = append(cluster.events, demux)
cluster.storages = append(cluster.storages, storage)
}
cluster.start(stopper)
return cluster
}
func (c *testCluster) start(stopper *util.Stopper) {
// Let all the states listen before starting any.
for _, node := range c.nodes {
node.start(stopper)
}
}
// createGroup replicates a group consisting of numReplicas members,
// the first being the node at index firstNode.
func (c *testCluster) createGroup(groupID uint64, firstNode, numReplicas int) {
var replicaIDs []uint64
for i := 0; i < numReplicas; i++ {
replicaIDs = append(replicaIDs, uint64(c.nodes[firstNode+i].nodeID))
}
for i := 0; i < numReplicas; i++ {
gs := c.storages[firstNode+i].GroupStorage(groupID)
memStorage := gs.(*blockableGroupStorage).s.(*raft.MemoryStorage)
if err := memStorage.SetHardState(raftpb.HardState{
Commit: 10,
Term: 5,
}); err != nil {
c.t.Fatal(err)
}
if err := memStorage.ApplySnapshot(raftpb.Snapshot{
Metadata: raftpb.SnapshotMetadata{
ConfState: raftpb.ConfState{
Nodes: replicaIDs,
},
Index: 10,
Term: 5,
},
}); err != nil {
c.t.Fatal(err)
}
node := c.nodes[firstNode+i]
err := node.CreateGroup(groupID)
if err != nil {
c.t.Fatal(err)
}
}
}
func (c *testCluster) triggerElection(nodeIndex int, groupID uint64) {
if err := c.nodes[nodeIndex].multiNode.Campaign(context.Background(), groupID); err != nil {
c.t.Fatal(err)
}
}
// Trigger an election on node i and wait for it to complete.
// TODO(bdarnell): once we have better leader discovery and forwarding/queuing, remove this.
func (c *testCluster) waitForElection(i int) *EventLeaderElection {
for {
e := <-c.events[i].LeaderElection
if e == nil {
panic("got nil LeaderElection event, channel likely closed")
}
// Ignore events with NodeID 0; these mark elections that are in progress.
if e.NodeID != 0 {
return e
}
}
}
func TestInitialLeaderElection(t *testing.T) {
defer leaktest.AfterTest(t)
// Run the test three times, each time triggering a different node's election clock.
// The node that requests an election first should win.
for leaderIndex := 0; leaderIndex < 3; leaderIndex++ {
log.Infof("testing leader election for node %v", leaderIndex)
stopper := util.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
groupID := uint64(1)
cluster.createGroup(groupID, 0, 3)
cluster.triggerElection(leaderIndex, groupID)
event := cluster.waitForElection(leaderIndex)
if event.GroupID != groupID {
t.Fatalf("election event had incorrect group id %v", event.GroupID)
}
if event.NodeID != cluster.nodes[leaderIndex].nodeID {
t.Fatalf("expected %v to win election, but was %v", cluster.nodes[leaderIndex].nodeID,
event.NodeID)
}
stopper.Stop()
}
}
// TestProposeBadGroup ensures that unknown group IDs are an error, not a panic.
func TestProposeBadGroup(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
err := <-cluster.nodes[1].SubmitCommand(7, "asdf", []byte{})
if err == nil {
t.Fatal("did not get expected error")
}
}
func TestLeaderElectionEvent(t *testing.T) {
defer leaktest.AfterTest(t)
// Leader election events are fired when the leader commits an entry, not when it
// issues a call for votes.
stopper := util.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
groupID := uint64(1)
cluster.createGroup(groupID, 0, 3)
// Process a Ready with a new leader but no new commits.
// This happens while an election is in progress.
cluster.nodes[1].maybeSendLeaderEvent(groupID, cluster.nodes[1].groups[groupID],
&raft.Ready{
SoftState: &raft.SoftState{
Lead: 3,
},
})
// No events are sent.
select {
case e := <-cluster.events[1].LeaderElection:
t.Fatalf("got unexpected event %v", e)
case <-time.After(time.Millisecond):
}
// Now there are new committed entries. A new leader always commits an entry
// to conclude the election.
entry := raftpb.Entry{
Index: 42,
Term: 42,
}
cluster.nodes[1].maybeSendLeaderEvent(groupID, cluster.nodes[1].groups[groupID],
&raft.Ready{
Entries: []raftpb.Entry{entry},
CommittedEntries: []raftpb.Entry{entry},
})
// Now we get an event.
select {
case e := <-cluster.events[1].LeaderElection:
if !reflect.DeepEqual(e, &EventLeaderElection{
GroupID: groupID,
NodeID: 3,
Term: 42,
}) {
t.Errorf("election event did not match expectations: %+v", e)
}
case <-time.After(time.Millisecond):
t.Fatal("didn't get expected event")
}
}
func TestCommand(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
groupID := uint64(1)
cluster.createGroup(groupID, 0, 3)
cluster.triggerElection(0, groupID)
cluster.waitForElection(0)
// Submit a command to the leader
cluster.nodes[0].SubmitCommand(groupID, makeCommandID(), []byte("command"))
// The command will be committed on each node.
for i, events := range cluster.events {
log.Infof("waiting for event to be committed on node %v", i)
commit := <-events.CommandCommitted
if string(commit.Command) != "command" {
t.Errorf("unexpected value in committed command: %v", commit.Command)
}
}
}
func TestSlowStorage(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
groupID := uint64(1)
cluster.createGroup(groupID, 0, 3)
cluster.triggerElection(0, groupID)
cluster.waitForElection(0)
// Block the storage on the last node.
// TODO(bdarnell): there appear to still be issues if the storage is blocked during
// the election.
cluster.storages[2].Block()
// Submit a command to the leader
cluster.nodes[0].SubmitCommand(groupID, makeCommandID(), []byte("command"))
// Even with the third node blocked, the other nodes can make progress.
for i := 0; i < 2; i++ {
events := cluster.events[i]
log.Infof("waiting for event to be commited on node %v", i)
commit := <-events.CommandCommitted
if string(commit.Command) != "command" {
t.Errorf("unexpected value in committed command: %v", commit.Command)
}
}
// Ensure that node 2 is in fact blocked.
time.Sleep(time.Millisecond)
select {
case commit := <-cluster.events[2].CommandCommitted:
t.Errorf("didn't expect commits on node 2 but got %v", commit)
default:
}
// After unblocking the third node, it will catch up.
cluster.storages[2].Unblock()
cluster.tickers[0].Tick()
log.Infof("waiting for event to be commited on node 2")
commit := <-cluster.events[2].CommandCommitted
if string(commit.Command) != "command" {
t.Errorf("unexpected value in committed command: %v", commit.Command)
}
}
func TestMembershipChange(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
cluster := newTestCluster(nil, 4, stopper, t)
defer stopper.Stop()
// Create a group with a single member, cluster.nodes[0].
groupID := uint64(1)
cluster.createGroup(groupID, 0, 1)
// An automatic election is triggered since this is a single-node Raft group.
cluster.waitForElection(0)
// Consume and apply the membership change events.
for i := 0; i < 4; i++ {
go func(i int) {
for {
e, ok := <-cluster.events[i].MembershipChangeCommitted
if !ok {
return
}
e.Callback(nil)
}
}(i)
}
// Add each of the other three nodes to the cluster.
for i := 1; i < 4; i++ {
ch := cluster.nodes[0].ChangeGroupMembership(groupID, makeCommandID(),
raftpb.ConfChangeAddNode,
cluster.nodes[i].nodeID, nil)
<-ch
}
// TODO(bdarnell): verify that the channel events are sent out correctly.
/*
for i := 0; i < 10; i++ {
log.Infof("tick %d", i)
cluster.tickers[0].Tick()
time.Sleep(5 * time.Millisecond)
}
// Each node is notified of each other node's joining.
for i := 0; i < 4; i++ {
for j := 1; j < 4; j++ {
select {
case e := <-cluster.events[i].MembershipChangeCommitted:
if e.NodeID != cluster.nodes[j].nodeID {
t.Errorf("node %d expected event for %d, got %d", i, j, e.NodeID)
}
default:
t.Errorf("node %d did not get expected event for %d", i, j)
}
}
}*/
}
func TestRapidMembershipChange(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
defer stopper.Stop()
proposers := 5
numCommit := int32(200)
cluster := newTestCluster(nil, 1, stopper, t)
groupID := uint64(1)
cluster.createGroup(groupID, 0, 1 /* replicas */)
cmdID := int32(0) // updated atomically from now on
cmdIDFormat := "%0" + fmt.Sprintf("%d", commandIDLen) + "d"
teardown := make(chan struct{})
proposerFn := func(i int) {
var seq int32
for {
seq = atomic.AddInt32(&cmdID, 1)
if seq > numCommit {
break
}
cmdID := fmt.Sprintf(cmdIDFormat, seq)
retry:
for {
if err := cluster.nodes[0].CreateGroup(groupID); err != nil {
t.Fatal(err)
}
if log.V(1) {
log.Infof("%-3d: try %s", i, cmdID)
}
select {
case err := <-cluster.nodes[0].SubmitCommand(groupID,
cmdID, []byte("command")):
if err == nil {
log.Infof("%-3d: ok %s", i, cmdID)
break retry
}
log.Infof("%-3d: err %s %s", i, cmdID, err)
case <-teardown:
return
}
}
if err := cluster.nodes[0].RemoveGroup(groupID); err != nil {
t.Fatal(err)
}
}
}
for i := 0; i < proposers; i++ {
go proposerFn(i)
}
for e := range cluster.events[0].CommandCommitted {
if log.V(1) {
log.Infof(" : recv %s", e.CommandID)
}
if fmt.Sprintf(cmdIDFormat, numCommit) == e.CommandID {
log.Infof("received everything we asked for, ending test")
break
}
}
close(teardown)
}
Fix the data race?
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License. See the AUTHORS file
// for names of contributors.
//
// Author: Ben Darnell
package multiraft
import (
"fmt"
"reflect"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/leaktest"
"github.com/cockroachdb/cockroach/util/log"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"golang.org/x/net/context"
)
var testRand, _ = util.NewPseudoRand()
func makeCommandID() string {
return string(util.RandBytes(testRand, commandIDLen))
}
type testCluster struct {
t *testing.T
nodes []*state
tickers []*manualTicker
events []*eventDemux
storages []*BlockableStorage
transport Transport
}
func newTestCluster(transport Transport, size int, stopper *util.Stopper, t *testing.T) *testCluster {
if transport == nil {
transport = NewLocalRPCTransport()
}
stopper.AddCloser(transport)
cluster := &testCluster{
t: t,
transport: transport,
}
for i := 0; i < size; i++ {
ticker := newManualTicker()
storage := &BlockableStorage{storage: NewMemoryStorage()}
config := &Config{
Transport: transport,
Storage: storage,
Ticker: ticker,
ElectionTimeoutTicks: 2,
HeartbeatIntervalTicks: 1,
TickInterval: time.Hour, // not in use
Strict: true,
}
mr, err := NewMultiRaft(NodeID(i+1), config)
if err != nil {
t.Fatal(err)
}
state := newState(mr)
demux := newEventDemux(state.Events)
demux.start(stopper)
cluster.nodes = append(cluster.nodes, state)
cluster.tickers = append(cluster.tickers, ticker)
cluster.events = append(cluster.events, demux)
cluster.storages = append(cluster.storages, storage)
}
cluster.start(stopper)
return cluster
}
func (c *testCluster) start(stopper *util.Stopper) {
// Let all the states listen before starting any.
for _, node := range c.nodes {
node.start(stopper)
}
}
// createGroup replicates a group consisting of numReplicas members,
// the first being the node at index firstNode.
func (c *testCluster) createGroup(groupID uint64, firstNode, numReplicas int) {
var replicaIDs []uint64
for i := 0; i < numReplicas; i++ {
replicaIDs = append(replicaIDs, uint64(c.nodes[firstNode+i].nodeID))
}
for i := 0; i < numReplicas; i++ {
gs := c.storages[firstNode+i].GroupStorage(groupID)
memStorage := gs.(*blockableGroupStorage).s.(*raft.MemoryStorage)
if err := memStorage.SetHardState(raftpb.HardState{
Commit: 10,
Term: 5,
}); err != nil {
c.t.Fatal(err)
}
if err := memStorage.ApplySnapshot(raftpb.Snapshot{
Metadata: raftpb.SnapshotMetadata{
ConfState: raftpb.ConfState{
Nodes: replicaIDs,
},
Index: 10,
Term: 5,
},
}); err != nil {
c.t.Fatal(err)
}
node := c.nodes[firstNode+i]
err := node.CreateGroup(groupID)
if err != nil {
c.t.Fatal(err)
}
}
}
func (c *testCluster) triggerElection(nodeIndex int, groupID uint64) {
if err := c.nodes[nodeIndex].multiNode.Campaign(context.Background(), groupID); err != nil {
c.t.Fatal(err)
}
}
// Trigger an election on node i and wait for it to complete.
// TODO(bdarnell): once we have better leader discovery and forwarding/queuing, remove this.
func (c *testCluster) waitForElection(i int) *EventLeaderElection {
for {
e := <-c.events[i].LeaderElection
if e == nil {
panic("got nil LeaderElection event, channel likely closed")
}
// Ignore events with NodeID 0; these mark elections that are in progress.
if e.NodeID != 0 {
return e
}
}
}
func TestInitialLeaderElection(t *testing.T) {
defer leaktest.AfterTest(t)
// Run the test three times, each time triggering a different node's election clock.
// The node that requests an election first should win.
for leaderIndex := 0; leaderIndex < 3; leaderIndex++ {
log.Infof("testing leader election for node %v", leaderIndex)
stopper := util.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
groupID := uint64(1)
cluster.createGroup(groupID, 0, 3)
cluster.triggerElection(leaderIndex, groupID)
event := cluster.waitForElection(leaderIndex)
if event.GroupID != groupID {
t.Fatalf("election event had incorrect group id %v", event.GroupID)
}
if event.NodeID != cluster.nodes[leaderIndex].nodeID {
t.Fatalf("expected %v to win election, but was %v", cluster.nodes[leaderIndex].nodeID,
event.NodeID)
}
stopper.Stop()
}
}
// TestProposeBadGroup ensures that unknown group IDs are an error, not a panic.
func TestProposeBadGroup(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
err := <-cluster.nodes[1].SubmitCommand(7, "asdf", []byte{})
if err == nil {
t.Fatal("did not get expected error")
}
}
func TestLeaderElectionEvent(t *testing.T) {
defer leaktest.AfterTest(t)
// Leader election events are fired when the leader commits an entry, not when it
// issues a call for votes.
stopper := util.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
groupID := uint64(1)
cluster.createGroup(groupID, 0, 3)
// Process a Ready with a new leader but no new commits.
// This happens while an election is in progress.
cluster.nodes[1].maybeSendLeaderEvent(groupID, cluster.nodes[1].groups[groupID],
&raft.Ready{
SoftState: &raft.SoftState{
Lead: 3,
},
})
// No events are sent.
select {
case e := <-cluster.events[1].LeaderElection:
t.Fatalf("got unexpected event %v", e)
case <-time.After(time.Millisecond):
}
// Now there are new committed entries. A new leader always commits an entry
// to conclude the election.
entry := raftpb.Entry{
Index: 42,
Term: 42,
}
cluster.nodes[1].maybeSendLeaderEvent(groupID, cluster.nodes[1].groups[groupID],
&raft.Ready{
Entries: []raftpb.Entry{entry},
CommittedEntries: []raftpb.Entry{entry},
})
// Now we get an event.
select {
case e := <-cluster.events[1].LeaderElection:
if !reflect.DeepEqual(e, &EventLeaderElection{
GroupID: groupID,
NodeID: 3,
Term: 42,
}) {
t.Errorf("election event did not match expectations: %+v", e)
}
case <-time.After(time.Millisecond):
t.Fatal("didn't get expected event")
}
}
func TestCommand(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
groupID := uint64(1)
cluster.createGroup(groupID, 0, 3)
cluster.triggerElection(0, groupID)
cluster.waitForElection(0)
// Submit a command to the leader
cluster.nodes[0].SubmitCommand(groupID, makeCommandID(), []byte("command"))
// The command will be committed on each node.
for i, events := range cluster.events {
log.Infof("waiting for event to be committed on node %v", i)
commit := <-events.CommandCommitted
if string(commit.Command) != "command" {
t.Errorf("unexpected value in committed command: %v", commit.Command)
}
}
}
func TestSlowStorage(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
cluster := newTestCluster(nil, 3, stopper, t)
defer stopper.Stop()
groupID := uint64(1)
cluster.createGroup(groupID, 0, 3)
cluster.triggerElection(0, groupID)
cluster.waitForElection(0)
// Block the storage on the last node.
// TODO(bdarnell): there appear to still be issues if the storage is blocked during
// the election.
cluster.storages[2].Block()
// Submit a command to the leader
cluster.nodes[0].SubmitCommand(groupID, makeCommandID(), []byte("command"))
// Even with the third node blocked, the other nodes can make progress.
for i := 0; i < 2; i++ {
events := cluster.events[i]
log.Infof("waiting for event to be commited on node %v", i)
commit := <-events.CommandCommitted
if string(commit.Command) != "command" {
t.Errorf("unexpected value in committed command: %v", commit.Command)
}
}
// Ensure that node 2 is in fact blocked.
time.Sleep(time.Millisecond)
select {
case commit := <-cluster.events[2].CommandCommitted:
t.Errorf("didn't expect commits on node 2 but got %v", commit)
default:
}
// After unblocking the third node, it will catch up.
cluster.storages[2].Unblock()
cluster.tickers[0].Tick()
log.Infof("waiting for event to be commited on node 2")
commit := <-cluster.events[2].CommandCommitted
if string(commit.Command) != "command" {
t.Errorf("unexpected value in committed command: %v", commit.Command)
}
}
func TestMembershipChange(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
cluster := newTestCluster(nil, 4, stopper, t)
defer stopper.Stop()
// Create a group with a single member, cluster.nodes[0].
groupID := uint64(1)
cluster.createGroup(groupID, 0, 1)
// An automatic election is triggered since this is a single-node Raft group.
cluster.waitForElection(0)
// Consume and apply the membership change events.
for i := 0; i < 4; i++ {
go func(i int) {
for {
e, ok := <-cluster.events[i].MembershipChangeCommitted
if !ok {
return
}
e.Callback(nil)
}
}(i)
}
// Add each of the other three nodes to the cluster.
for i := 1; i < 4; i++ {
ch := cluster.nodes[0].ChangeGroupMembership(groupID, makeCommandID(),
raftpb.ConfChangeAddNode,
cluster.nodes[i].nodeID, nil)
<-ch
}
// TODO(bdarnell): verify that the channel events are sent out correctly.
/*
for i := 0; i < 10; i++ {
log.Infof("tick %d", i)
cluster.tickers[0].Tick()
time.Sleep(5 * time.Millisecond)
}
// Each node is notified of each other node's joining.
for i := 0; i < 4; i++ {
for j := 1; j < 4; j++ {
select {
case e := <-cluster.events[i].MembershipChangeCommitted:
if e.NodeID != cluster.nodes[j].nodeID {
t.Errorf("node %d expected event for %d, got %d", i, j, e.NodeID)
}
default:
t.Errorf("node %d did not get expected event for %d", i, j)
}
}
}*/
}
func TestRapidMembershipChange(t *testing.T) {
defer leaktest.AfterTest(t)
stopper := util.NewStopper()
defer stopper.Stop()
var wg sync.WaitGroup
proposers := 5
numCommit := int32(200)
cluster := newTestCluster(nil, 1, stopper, t)
groupID := uint64(1)
cluster.createGroup(groupID, 0, 1 /* replicas */)
startSeq := int32(0) // updated atomically from now on
cmdIDFormat := "%0" + fmt.Sprintf("%d", commandIDLen) + "d"
teardown := make(chan struct{})
proposerFn := func(i int) {
defer wg.Done()
var seq int32
for {
seq = atomic.AddInt32(&startSeq, 1)
if seq > numCommit {
break
}
cmdID := fmt.Sprintf(cmdIDFormat, seq)
retry:
for {
if err := cluster.nodes[0].CreateGroup(groupID); err != nil {
t.Fatal(err)
}
if log.V(1) {
log.Infof("%-3d: try %s", i, cmdID)
}
select {
case err := <-cluster.nodes[0].SubmitCommand(groupID,
cmdID, []byte("command")):
if err == nil {
log.Infof("%-3d: ok %s", i, cmdID)
break retry
}
log.Infof("%-3d: err %s %s", i, cmdID, err)
case <-teardown:
return
}
}
if err := cluster.nodes[0].RemoveGroup(groupID); err != nil {
t.Fatal(err)
}
}
}
for i := 0; i < proposers; i++ {
wg.Add(1)
go proposerFn(i)
}
for e := range cluster.events[0].CommandCommitted {
if log.V(1) {
log.Infof(" : recv %s", e.CommandID)
}
if fmt.Sprintf(cmdIDFormat, numCommit) == e.CommandID {
log.Infof("received everything we asked for, ending test")
break
}
}
close(teardown)
// Because ending the test case is racy with the test itself, we wait until
// all our goroutines have finished their work before we allow the test to
// forcible terminate. This solves a race condition on `t`, which is
// otherwise subject to concurrent access from our goroutine and the go
// testing machinery.
wg.Wait()
}
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mungers
import (
"path"
"strings"
"k8s.io/contrib/mungegithub/features"
"k8s.io/contrib/mungegithub/github"
"k8s.io/contrib/mungegithub/mungers/mungerutil"
"k8s.io/kubernetes/pkg/util/sets"
"github.com/golang/glog"
"github.com/spf13/cobra"
)
const maxDepth = 3
// ApprovalHandler will try to add "approved" label once
// all files of change has been approved by approvers.
type ApprovalHandler struct {
features *features.Features
}
func init() {
h := &ApprovalHandler{}
RegisterMungerOrDie(h)
}
// Name is the name usable in --pr-mungers
func (*ApprovalHandler) Name() string { return "approval-handler" }
// RequiredFeatures is a slice of 'features' that must be provided
func (*ApprovalHandler) RequiredFeatures() []string {
return []string{features.RepoFeatureName, features.AliasesFeature}
}
// Initialize will initialize the munger
func (h *ApprovalHandler) Initialize(config *github.Config, features *features.Features) error {
h.features = features
return nil
}
// EachLoop is called at the start of every munge loop
func (*ApprovalHandler) EachLoop() error { return nil }
// AddFlags will add any request flags to the cobra `cmd`
func (*ApprovalHandler) AddFlags(cmd *cobra.Command, config *github.Config) {}
// Munge is the workhorse the will actually make updates to the PR
// The algorithm goes as:
// - Initially, we set up approverSet
// - Go through all comments after latest commit. If any approver said "/approve", add him to approverSet.
// - For each file, we see if any approver of this file is in approverSet.
// - An approver of a file is defined as:
// - It's known that each dir has a list of approvers. (This might not hold true. For usability, current situation is enough.)
// - Approver of a dir is also the approver of child dirs.
// - We look at top N (default 3) level dir approvers. For example, for file "/a/b/c/d/e", we might search for approver from
// "/", "/a/", "/a/b/"
// - Iff all files has been approved, the bot will add "approved" label.
func (h *ApprovalHandler) Munge(obj *github.MungeObject) {
if !obj.IsPR() {
return
}
files, err := obj.ListFiles()
if err != nil {
glog.Errorf("failed to list files in this PR: %v", err)
return
}
comments, err := getCommentsAfterLastModified(obj)
if err != nil {
glog.Errorf("failed to get comments in this PR: %v", err)
return
}
approverSet := sets.String{}
// from oldest to latest
for i := len(comments) - 1; i >= 0; i-- {
c := comments[i]
if !mungerutil.IsValidUser(c.User) {
continue
}
fields := strings.Fields(strings.TrimSpace(*c.Body))
if len(fields) == 1 && strings.ToLower(fields[0]) == "/approve" {
approverSet.Insert(*c.User.Login)
continue
}
if len(fields) == 2 && strings.ToLower(fields[0]) == "/approve" && strings.ToLower(fields[1]) == "cancel" {
approverSet.Delete(*c.User.Login)
}
}
for _, file := range files {
if !h.hasApproval(*file.Filename, approverSet, maxDepth) {
return
}
}
obj.AddLabel(approvedLabel)
}
func (h *ApprovalHandler) hasApproval(filename string, approverSet sets.String, depth int) bool {
paths := strings.Split(filename, "/")
p := ""
for i := 0; i < len(paths) && i < depth; i++ {
fileOwners := h.features.Repos.LeafAssignees(p)
if fileOwners.Len() == 0 {
glog.Warningf("Couldn't find an owner for path (%s)", p)
continue
}
if h.features.Aliases != nil && h.features.Aliases.IsEnabled {
fileOwners = h.features.Aliases.Expand(fileOwners)
}
for _, owner := range fileOwners.List() {
if approverSet.Has(owner) {
return true
}
}
p = path.Join(p, paths[i])
}
return false
}
Un-delete getCommentsAfterLastModified.
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mungers
import (
"path"
"strings"
"k8s.io/contrib/mungegithub/features"
"k8s.io/contrib/mungegithub/github"
"k8s.io/contrib/mungegithub/mungers/mungerutil"
"k8s.io/kubernetes/pkg/util/sets"
"github.com/golang/glog"
"github.com/spf13/cobra"
)
const maxDepth = 3
// ApprovalHandler will try to add "approved" label once
// all files of change has been approved by approvers.
type ApprovalHandler struct {
features *features.Features
}
func init() {
h := &ApprovalHandler{}
RegisterMungerOrDie(h)
}
// Name is the name usable in --pr-mungers
func (*ApprovalHandler) Name() string { return "approval-handler" }
// RequiredFeatures is a slice of 'features' that must be provided
func (*ApprovalHandler) RequiredFeatures() []string {
return []string{features.RepoFeatureName, features.AliasesFeature}
}
// Initialize will initialize the munger
func (h *ApprovalHandler) Initialize(config *github.Config, features *features.Features) error {
h.features = features
return nil
}
// EachLoop is called at the start of every munge loop
func (*ApprovalHandler) EachLoop() error { return nil }
// AddFlags will add any request flags to the cobra `cmd`
func (*ApprovalHandler) AddFlags(cmd *cobra.Command, config *github.Config) {}
// Munge is the workhorse the will actually make updates to the PR
// The algorithm goes as:
// - Initially, we set up approverSet
// - Go through all comments after latest commit. If any approver said "/approve", add him to approverSet.
// - For each file, we see if any approver of this file is in approverSet.
// - An approver of a file is defined as:
// - It's known that each dir has a list of approvers. (This might not hold true. For usability, current situation is enough.)
// - Approver of a dir is also the approver of child dirs.
// - We look at top N (default 3) level dir approvers. For example, for file "/a/b/c/d/e", we might search for approver from
// "/", "/a/", "/a/b/"
// - Iff all files has been approved, the bot will add "approved" label.
func (h *ApprovalHandler) Munge(obj *github.MungeObject) {
if !obj.IsPR() {
return
}
files, err := obj.ListFiles()
if err != nil {
glog.Errorf("failed to list files in this PR: %v", err)
return
}
comments, err := getCommentsAfterLastModified(obj)
if err != nil {
glog.Errorf("failed to get comments in this PR: %v", err)
return
}
approverSet := sets.String{}
// from oldest to latest
for i := len(comments) - 1; i >= 0; i-- {
c := comments[i]
if !mungerutil.IsValidUser(c.User) {
continue
}
fields := strings.Fields(strings.TrimSpace(*c.Body))
if len(fields) == 1 && strings.ToLower(fields[0]) == "/approve" {
approverSet.Insert(*c.User.Login)
continue
}
if len(fields) == 2 && strings.ToLower(fields[0]) == "/approve" && strings.ToLower(fields[1]) == "cancel" {
approverSet.Delete(*c.User.Login)
}
}
for _, file := range files {
if !h.hasApproval(*file.Filename, approverSet, maxDepth) {
return
}
}
obj.AddLabel(approvedLabel)
}
func (h *ApprovalHandler) hasApproval(filename string, approverSet sets.String, depth int) bool {
paths := strings.Split(filename, "/")
p := ""
for i := 0; i < len(paths) && i < depth; i++ {
fileOwners := h.features.Repos.LeafAssignees(p)
if fileOwners.Len() == 0 {
glog.Warningf("Couldn't find an owner for path (%s)", p)
continue
}
if h.features.Aliases != nil && h.features.Aliases.IsEnabled {
fileOwners = h.features.Aliases.Expand(fileOwners)
}
for _, owner := range fileOwners.List() {
if approverSet.Has(owner) {
return true
}
}
p = path.Join(p, paths[i])
}
return false
}
func getCommentsAfterLastModified(obj *github.MungeObject) ([]*githubapi.IssueComment, error) {
afterLastModified := func(opt *githubapi.IssueListCommentsOptions) *githubapi.IssueListCommentsOptions {
// Only comments updated at or after this time are returned.
// One possible case is that reviewer might "/lgtm" first, contributor updated PR, and reviewer updated "/lgtm".
// This is still valid. We don't recommend user to update it.
lastModified := *obj.LastModifiedTime()
opt.Since = lastModified
return opt
}
return obj.ListComments(afterLastModified)
}
|
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mungers
import (
"time"
"github.com/golang/glog"
"github.com/google/go-github/github"
"github.com/spf13/cobra"
"k8s.io/contrib/mungegithub/features"
mgh "k8s.io/contrib/mungegithub/github"
c "k8s.io/contrib/mungegithub/mungers/matchers/comment"
"k8s.io/contrib/mungegithub/mungers/mungerutil"
)
const (
flakeNagNotifName = "FLAKE-PING"
// defaultTimePeriod is priority/P1 (to get a human to prioritize)
defaultTimePeriod = 4 * 24 * time.Hour
)
var (
pinger = c.NewPinger(flakeNagNotifName).
SetDescription("This flaky-test issue would love to have more attention.")
// Only include priorities that you care about. Others won't be pinged
timePeriods = map[string]time.Duration{
"priority/P0": 2 * 24 * time.Hour,
"priority/P1": 4 * 24 * time.Hour,
"priority/P2": 2 * 30 * 24 * time.Hour,
"priority/P3": time.Duration(1<<63 - 1),
}
)
// NagFlakeIssues pings assignees on flaky-test issues
type NagFlakeIssues struct{}
var _ Munger = &NagFlakeIssues{}
func init() {
n := &NagFlakeIssues{}
RegisterMungerOrDie(n)
RegisterStaleComments(n)
}
// Name is the name usable in --pr-mungers
func (NagFlakeIssues) Name() string { return "nag-flake-issues" }
// RequiredFeatures is a slice of 'features' that must be provided
func (NagFlakeIssues) RequiredFeatures() []string { return []string{} }
// Initialize will initialize the munger
func (NagFlakeIssues) Initialize(config *mgh.Config, features *features.Features) error {
return nil
}
// EachLoop is called at the start of every munge loop
func (NagFlakeIssues) EachLoop() error { return nil }
// AddFlags will add any request flags to the cobra `cmd`
func (NagFlakeIssues) AddFlags(cmd *cobra.Command, config *mgh.Config) {
}
// findTimePeriod returns how often we should ping based on priority
func findTimePeriod(labels []github.Label) time.Duration {
priorities := mgh.GetLabelsWithPrefix(labels, "priority/")
if len(priorities) == 0 {
return defaultTimePeriod
}
// If we have multiple priority labels (shouldn't happen), use the first one
period, ok := timePeriods[priorities[0]]
if !ok {
return defaultTimePeriod
}
return period
}
// Munge is the workhorse the will actually make updates to the PR
func (NagFlakeIssues) Munge(obj *mgh.MungeObject) {
if obj.IsPR() || !obj.HasLabel("kind/flake") {
return
}
comments, err := obj.ListComments()
if err != nil {
glog.Error(err)
return
}
// Use the pinger to notify assignees:
// - Set time period based on configuration (at the top of this file)
// - Mention list of assignees as an argument
// - Start the ping timer after the last HumanActor comment
// How often should we ping
period := findTimePeriod(obj.Issue.Labels)
// Who are we pinging
who := mungerutil.GetIssueUsers(obj.Issue).Assignees.Mention().Join()
if who == "" {
return
}
// When does the pinger start
startDate := c.LastComment(comments, c.HumanActor(), obj.Issue.CreatedAt)
// Get a notification if it's time to ping.
notif := pinger.SetTimePeriod(period).PingNotification(
comments,
who,
startDate,
)
if notif != nil {
obj.WriteComment(notif.String())
}
}
// StaleComments returns a slice of stale comments
func (NagFlakeIssues) StaleComments(obj *mgh.MungeObject, comments []*github.IssueComment) []*github.IssueComment {
// Remove all pings written before the last human actor comment
return c.FilterComments(comments, c.And([]c.Matcher{
c.MungerNotificationName(flakeNagNotifName),
c.CreatedBefore(*c.LastComment(comments, c.HumanActor(), &time.Time{})),
}))
}
mungegithub: s/NotifName/NotifyName/
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mungers
import (
"time"
"github.com/golang/glog"
"github.com/google/go-github/github"
"github.com/spf13/cobra"
"k8s.io/contrib/mungegithub/features"
mgh "k8s.io/contrib/mungegithub/github"
c "k8s.io/contrib/mungegithub/mungers/matchers/comment"
"k8s.io/contrib/mungegithub/mungers/mungerutil"
)
const (
flakeNagNotifyName = "FLAKE-PING"
// defaultTimePeriod is priority/P1 (to get a human to prioritize)
defaultTimePeriod = 4 * 24 * time.Hour
)
var (
pinger = c.NewPinger(flakeNagNotifyName).
SetDescription("This flaky-test issue would love to have more attention.")
// Only include priorities that you care about. Others won't be pinged
timePeriods = map[string]time.Duration{
"priority/P0": 2 * 24 * time.Hour,
"priority/P1": 4 * 24 * time.Hour,
"priority/P2": 2 * 30 * 24 * time.Hour,
"priority/P3": time.Duration(1<<63 - 1),
}
)
// NagFlakeIssues pings assignees on flaky-test issues
type NagFlakeIssues struct{}
var _ Munger = &NagFlakeIssues{}
func init() {
n := &NagFlakeIssues{}
RegisterMungerOrDie(n)
RegisterStaleComments(n)
}
// Name is the name usable in --pr-mungers
func (NagFlakeIssues) Name() string { return "nag-flake-issues" }
// RequiredFeatures is a slice of 'features' that must be provided
func (NagFlakeIssues) RequiredFeatures() []string { return []string{} }
// Initialize will initialize the munger
func (NagFlakeIssues) Initialize(config *mgh.Config, features *features.Features) error {
return nil
}
// EachLoop is called at the start of every munge loop
func (NagFlakeIssues) EachLoop() error { return nil }
// AddFlags will add any request flags to the cobra `cmd`
func (NagFlakeIssues) AddFlags(cmd *cobra.Command, config *mgh.Config) {
}
// findTimePeriod returns how often we should ping based on priority
func findTimePeriod(labels []github.Label) time.Duration {
priorities := mgh.GetLabelsWithPrefix(labels, "priority/")
if len(priorities) == 0 {
return defaultTimePeriod
}
// If we have multiple priority labels (shouldn't happen), use the first one
period, ok := timePeriods[priorities[0]]
if !ok {
return defaultTimePeriod
}
return period
}
// Munge is the workhorse the will actually make updates to the PR
func (NagFlakeIssues) Munge(obj *mgh.MungeObject) {
if obj.IsPR() || !obj.HasLabel("kind/flake") {
return
}
comments, err := obj.ListComments()
if err != nil {
glog.Error(err)
return
}
// Use the pinger to notify assignees:
// - Set time period based on configuration (at the top of this file)
// - Mention list of assignees as an argument
// - Start the ping timer after the last HumanActor comment
// How often should we ping
period := findTimePeriod(obj.Issue.Labels)
// Who are we pinging
who := mungerutil.GetIssueUsers(obj.Issue).Assignees.Mention().Join()
if who == "" {
return
}
// When does the pinger start
startDate := c.LastComment(comments, c.HumanActor(), obj.Issue.CreatedAt)
// Get a notification if it's time to ping.
notif := pinger.SetTimePeriod(period).PingNotification(
comments,
who,
startDate,
)
if notif != nil {
obj.WriteComment(notif.String())
}
}
// StaleComments returns a slice of stale comments
func (NagFlakeIssues) StaleComments(obj *mgh.MungeObject, comments []*github.IssueComment) []*github.IssueComment {
// Remove all pings written before the last human actor comment
return c.FilterComments(comments, c.And([]c.Matcher{
c.MungerNotificationName(flakeNagNotifyName),
c.CreatedBefore(*c.LastComment(comments, c.HumanActor(), &time.Time{})),
}))
}
|
package hsts
import (
"net/http"
"reflect"
"testing"
)
func TestTransport_RoundTrip(t *testing.T) {
memStorage := &MemStorage{}
hstsTransport := &Transport{
Storage: memStorage,
}
expRedirect, _ := redirect("https://example.com/index.html", newRequest("http://example.com/index.html"))
tests := []struct {
name string
server func(req *http.Request) (*http.Response, error)
req *http.Request
expected *http.Response
}{
{
name: "normal",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{req.URL.Scheme},
},
}, nil
},
req: newRequest("http://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{"http"},
},
},
},
{
name: "set hsts header under http",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{req.URL.Scheme},
"Strict-Transport-Security": []string{"max-age=10000"},
},
}, nil
},
req: newRequest("http://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{"http"},
"Strict-Transport-Security": []string{"max-age=10000"},
},
},
},
{
name: "ensure http STS header was ignored",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{req.URL.Scheme},
},
}, nil
},
req: newRequest("http://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{"http"},
},
},
},
{
name: "set hsts header",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{req.URL.Scheme},
"Strict-Transport-Security": []string{"max-age=10000"},
},
}, nil
},
req: newRequest("https://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{"https"},
"Strict-Transport-Security": []string{"max-age=10000"},
},
},
},
{
name: "verify hsts header was set",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{req.URL.Scheme},
},
}, nil
},
req: newRequest("http://example.com/index.html"),
expected: expRedirect,
},
{
name: "remove hsts",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{req.URL.Scheme},
"Strict-Transport-Security": []string{"max-age=0"},
},
}, nil
},
req: newRequest("https://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{"https"},
"Strict-Transport-Security": []string{"max-age=0"},
},
},
},
{
name: "verify hsts header is no longer set",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{req.URL.Scheme},
},
}, nil
},
req: newRequest("http://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{"http"},
},
},
},
{
name: "set hsts header for max-age verification",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{req.URL.Scheme},
"Strict-Transport-Security": []string{"max-age=10000"},
},
}, nil
},
req: newRequest("https://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": []string{"https"},
"Strict-Transport-Security": []string{"max-age=10000"},
},
},
},
}
for _, test := range tests {
// inject the stub transport
hstsTransport.Transport = &stubTrip{f: test.server}
// perform the call
out, err := hstsTransport.RoundTrip(test.req)
if err != nil {
t.Logf("got err:%v", err)
t.Fatalf("test case failed: %s", test.name)
}
if !equalResponses(out, test.expected) {
t.Logf("got:%v", out)
t.Logf("want:%v", test.expected)
t.Fatalf("test case failed: %s", test.name)
}
}
if memStorage.domains["example.com"].MaxAge != 10000 {
t.Fatalf("max-age not set properly")
}
}
func newRequest(url string) *http.Request {
r, _ := http.NewRequest("GET", url, nil)
return r
}
type stubTrip struct {
f func(req *http.Request) (*http.Response, error)
}
func (s *stubTrip) RoundTrip(req *http.Request) (*http.Response, error) {
return s.f(req)
}
// equalResponses is a custom comparator for http.Response since the Body
// parameter does not compare well with reflect.DeepEqual
func equalResponses(a, b *http.Response) bool {
if a == nil && b == nil {
return true
}
if a == nil || b == nil {
return false
}
if a.Status != b.Status {
return false
}
if a.Proto != b.Proto {
return false
}
if !reflect.DeepEqual(a.Header, b.Header) {
return false
}
if !reflect.DeepEqual(a.Request, b.Request) {
return false
}
return true
}
gofmt hsts test
package hsts
import (
"net/http"
"reflect"
"testing"
)
func TestTransport_RoundTrip(t *testing.T) {
memStorage := &MemStorage{}
hstsTransport := &Transport{
Storage: memStorage,
}
expRedirect, _ := redirect("https://example.com/index.html", newRequest("http://example.com/index.html"))
tests := []struct {
name string
server func(req *http.Request) (*http.Response, error)
req *http.Request
expected *http.Response
}{
{
name: "normal",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {req.URL.Scheme},
},
}, nil
},
req: newRequest("http://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {"http"},
},
},
},
{
name: "set hsts header under http",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {req.URL.Scheme},
"Strict-Transport-Security": {"max-age=10000"},
},
}, nil
},
req: newRequest("http://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {"http"},
"Strict-Transport-Security": {"max-age=10000"},
},
},
},
{
name: "ensure http STS header was ignored",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {req.URL.Scheme},
},
}, nil
},
req: newRequest("http://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {"http"},
},
},
},
{
name: "set hsts header",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {req.URL.Scheme},
"Strict-Transport-Security": {"max-age=10000"},
},
}, nil
},
req: newRequest("https://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {"https"},
"Strict-Transport-Security": {"max-age=10000"},
},
},
},
{
name: "verify hsts header was set",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {req.URL.Scheme},
},
}, nil
},
req: newRequest("http://example.com/index.html"),
expected: expRedirect,
},
{
name: "remove hsts",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {req.URL.Scheme},
"Strict-Transport-Security": {"max-age=0"},
},
}, nil
},
req: newRequest("https://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {"https"},
"Strict-Transport-Security": {"max-age=0"},
},
},
},
{
name: "verify hsts header is no longer set",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {req.URL.Scheme},
},
}, nil
},
req: newRequest("http://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {"http"},
},
},
},
{
name: "set hsts header for max-age verification",
server: func(req *http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {req.URL.Scheme},
"Strict-Transport-Security": {"max-age=10000"},
},
}, nil
},
req: newRequest("https://example.com/index.html"),
expected: &http.Response{
StatusCode: 200,
Header: map[string][]string{
"scheme": {"https"},
"Strict-Transport-Security": {"max-age=10000"},
},
},
},
}
for _, test := range tests {
// inject the stub transport
hstsTransport.Transport = &stubTrip{f: test.server}
// perform the call
out, err := hstsTransport.RoundTrip(test.req)
if err != nil {
t.Logf("got err:%v", err)
t.Fatalf("test case failed: %s", test.name)
}
if !equalResponses(out, test.expected) {
t.Logf("got:%v", out)
t.Logf("want:%v", test.expected)
t.Fatalf("test case failed: %s", test.name)
}
}
if memStorage.domains["example.com"].MaxAge != 10000 {
t.Fatalf("max-age not set properly")
}
}
func newRequest(url string) *http.Request {
r, _ := http.NewRequest("GET", url, nil)
return r
}
type stubTrip struct {
f func(req *http.Request) (*http.Response, error)
}
func (s *stubTrip) RoundTrip(req *http.Request) (*http.Response, error) {
return s.f(req)
}
// equalResponses is a custom comparator for http.Response since the Body
// parameter does not compare well with reflect.DeepEqual
func equalResponses(a, b *http.Response) bool {
if a == nil && b == nil {
return true
}
if a == nil || b == nil {
return false
}
if a.Status != b.Status {
return false
}
if a.Proto != b.Proto {
return false
}
if !reflect.DeepEqual(a.Header, b.Header) {
return false
}
if !reflect.DeepEqual(a.Request, b.Request) {
return false
}
return true
}
|
package main
import (
"flag"
"log"
"net/http"
"github.com/antongulenko/http-isolation-proxy/proxy"
"github.com/antongulenko/http-isolation-proxy/services"
"github.com/go-ini/ini"
"github.com/kardianos/osext"
)
const (
stats_path = "/stats"
runtime_path = "/runtime"
open_files = 40000
)
func check(err error) {
if err != nil {
log.Fatalln(err)
}
}
func loadServiceRegistry(confIni *ini.File) proxy.LocalRegistry {
reg := make(proxy.LocalRegistry)
addService := func(name string, endpoints ...string) {
for _, addr := range endpoints {
endpoint := &proxy.Endpoint{
Service: name,
Host: addr,
}
endpoint.TestActive()
reg.Add(name, endpoint)
}
}
confSection, err := confIni.GetSection("backends")
check(err)
for _, service := range confSection.Keys() {
addService(service.Name(), service.Strings(",")...)
}
return reg
}
func handleServices(confIni *ini.File, p *proxy.IsolationProxy) {
confSection, err := confIni.GetSection("services")
check(err)
for _, service := range confSection.Keys() {
go func(service *ini.Key) {
check(p.Handle(service.Name(), service.String()))
}(service)
}
}
func main() {
execFolder, err := osext.ExecutableFolder()
check(err)
configFile := flag.String("conf", execFolder+"/isolator.ini", "Config containing isolated external services")
statsAddr := flag.String("stats", ":7777", "Address to serve statistics (HTTP+JSON on "+stats_path+" and "+runtime_path+")")
flag.Parse()
check(services.SetOpenFilesLimit(open_files))
confIni, err := ini.Load(*configFile)
check(err)
p := &proxy.IsolationProxy{
Registry: loadServiceRegistry(confIni),
}
services.EnableResponseLogging()
p.ServeStats(stats_path)
proxy.ServeRuntimeStats(runtime_path)
handleServices(confIni, p)
check(http.ListenAndServe(*statsAddr, nil))
}
added exception in isolator for locally running services.
package main
import (
"flag"
"log"
"net/http"
"github.com/antongulenko/http-isolation-proxy/proxy"
"github.com/antongulenko/http-isolation-proxy/services"
"github.com/go-ini/ini"
"github.com/kardianos/osext"
)
const (
stats_path = "/stats"
runtime_path = "/runtime"
open_files = 40000
)
func check(err error) {
if err != nil {
log.Fatalln(err)
}
}
func loadServiceRegistry(confIni *ini.File) proxy.LocalRegistry {
reg := make(proxy.LocalRegistry)
addService := func(name string, endpoints ...string) {
for _, addr := range endpoints {
endpoint := &proxy.Endpoint{
Service: name,
Host: addr,
}
endpoint.TestActive()
reg.Add(name, endpoint)
}
}
confSection, err := confIni.GetSection("backends")
check(err)
for _, service := range confSection.Keys() {
addService(service.Name(), service.Strings(",")...)
}
return reg
}
func isRunningLocally(service string, serviceEndpoint string, reg proxy.Registry) bool {
if endpoints, err := reg.Endpoints(service); err == nil {
for _, endpoint := range endpoints {
// TODO should compare IP/host and port
if endpoint.Host == serviceEndpoint {
return true
}
}
}
return false
}
func handleServices(confIni *ini.File, p *proxy.IsolationProxy) {
confSection, err := confIni.GetSection("services")
check(err)
for _, service := range confSection.Keys() {
if !isRunningLocally(service.Name(), service.String(), p.Registry) {
go func(service *ini.Key) {
check(p.Handle(service.Name(), service.String()))
}(service)
} else {
// If the service should be running locally on the same port, don't proxy it
services.L.Warnf("Not handling %s on %s: should be running locally", service.Name(), service.String())
}
}
}
func main() {
execFolder, err := osext.ExecutableFolder()
check(err)
configFile := flag.String("conf", execFolder+"/isolator.ini", "Config containing isolated external services")
statsAddr := flag.String("stats", ":7777", "Address to serve statistics (HTTP+JSON on "+stats_path+" and "+runtime_path+")")
flag.Parse()
check(services.SetOpenFilesLimit(open_files))
confIni, err := ini.Load(*configFile)
check(err)
p := &proxy.IsolationProxy{
Registry: loadServiceRegistry(confIni),
}
services.EnableResponseLogging()
p.ServeStats(stats_path)
proxy.ServeRuntimeStats(runtime_path)
handleServices(confIni, p)
check(http.ListenAndServe(*statsAddr, nil))
}
|
package main
import (
"errors"
"testing"
)
func TestGetNewSet(t *testing.T) {
g := NewGateKeeper()
g.owners["key"] = true
err := g.New("name", "value", "key")
if err != nil {
t.Error(err)
}
err = g.Set("name", "value2", "key")
if err != nil {
t.Error(err)
}
v, err := g.Get("name", "key")
if v != "value2" {
t.Error(errors.New("key value is not value2"))
}
}
gatekeeper: Add permission tests
package main
import (
"errors"
"testing"
)
func TestGetNewSet(t *testing.T) {
g := NewGateKeeper()
g.owners["key"] = true
err := g.New("name", "value", "key")
if err != nil {
t.Error(err)
}
err = g.Set("name", "value2", "key")
if err != nil {
t.Error(err)
}
v, err := g.Get("name", "key")
if v != "value2" {
t.Error(errors.New("key value is not value2"))
}
}
func TestPermission(t *testing.T) {
g := NewGateKeeper()
g.owners["key"] = true
err := g.New("name", "value", "key")
if err != nil {
t.Error(err)
}
err = g.AddAccess("name", "key", "keyother")
if err != nil {
t.Error(err)
}
v, err := g.Get("name", "keyother")
if err != nil {
t.Error(err)
}
if v != "value" {
t.Error(errors.New("key value is not value"))
}
err = g.RemoveAccess("name", "key", "keyother")
if err != nil {
t.Error(err)
}
v, err = g.Get("name", "keyother")
if err == nil {
t.Error(errors.New("No permission denied thrown"))
}
}
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Job E2E Test", func() {
It("Schedule Job", func() {
context := initTestContext()
defer cleanupTestContext(context)
rep := clusterSize(context, oneCPU)
_, pg := createJob(context, &jobSpec{
name: "qj-1",
tasks: []taskSpec{
{
img: "busybox",
req: oneCPU,
min: 2,
rep: rep,
},
},
})
err := waitPodGroupReady(context, pg)
checkError(context, err)
})
It("Schedule Multiple Jobs", func() {
context := initTestContext()
defer cleanupTestContext(context)
rep := clusterSize(context, oneCPU)
job := &jobSpec{
tasks: []taskSpec{
{
img: "busybox",
req: oneCPU,
min: 2,
rep: rep,
},
},
}
job.name = "mqj-1"
_, pg1 := createJob(context, job)
job.name = "mqj-2"
_, pg2 := createJob(context, job)
job.name = "mqj-3"
_, pg3 := createJob(context, job)
err := waitPodGroupReady(context, pg1)
checkError(context, err)
err = waitPodGroupReady(context, pg2)
checkError(context, err)
err = waitPodGroupReady(context, pg3)
checkError(context, err)
})
It("Gang scheduling", func() {
context := initTestContext()
defer cleanupTestContext(context)
rep := clusterSize(context, oneCPU)/2 + 1
replicaset := createReplicaSet(context, "rs-1", rep, "nginx", oneCPU)
err := waitReplicaSetReady(context, replicaset.Name)
checkError(context, err)
job := &jobSpec{
name: "gang-qj",
namespace: "test",
tasks: []taskSpec{
{
img: "busybox",
req: oneCPU,
min: rep,
rep: rep,
},
},
}
_, pg := createJob(context, job)
err = waitPodGroupPending(context, pg)
checkError(context, err)
err = waitPodGroupUnschedulable(context, pg)
checkError(context, err)
err = deleteReplicaSet(context, replicaset.Name)
checkError(context, err)
err = waitPodGroupReady(context, pg)
checkError(context, err)
})
It("Gang scheduling: Full Occupied", func() {
context := initTestContext()
defer cleanupTestContext(context)
rep := clusterSize(context, oneCPU)
job := &jobSpec{
namespace: "test",
tasks: []taskSpec{
{
img: "nginx",
req: oneCPU,
min: rep,
rep: rep,
},
},
}
job.name = "gang-fq-qj1"
_, pg1 := createJob(context, job)
err := waitPodGroupReady(context, pg1)
checkError(context, err)
job.name = "gang-fq-qj2"
_, pg2 := createJob(context, job)
err = waitPodGroupPending(context, pg2)
checkError(context, err)
err = waitPodGroupReady(context, pg1)
checkError(context, err)
})
It("Preemption", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
job := &jobSpec{
tasks: []taskSpec{
{
img: "nginx",
req: slot,
min: 1,
rep: rep,
},
},
}
job.name = "preemptee-qj"
_, pg1 := createJob(context, job)
err := waitTasksReady(context, pg1, int(rep))
checkError(context, err)
job.name = "preemptor-qj"
_, pg2 := createJob(context, job)
err = waitTasksReady(context, pg1, int(rep)/2)
checkError(context, err)
err = waitTasksReady(context, pg2, int(rep)/2)
checkError(context, err)
})
It("Multiple Preemption", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
job := &jobSpec{
tasks: []taskSpec{
{
img: "nginx",
req: slot,
min: 1,
rep: rep,
},
},
}
job.name = "preemptee-qj"
_, pg1 := createJob(context, job)
err := waitTasksReady(context, pg1, int(rep))
checkError(context, err)
job.name = "preemptor-qj1"
_, pg2 := createJob(context, job)
checkError(context, err)
job.name = "preemptor-qj2"
_, pg3 := createJob(context, job)
checkError(context, err)
err = waitTasksReady(context, pg1, int(rep)/3)
checkError(context, err)
err = waitTasksReady(context, pg2, int(rep)/3)
checkError(context, err)
err = waitTasksReady(context, pg3, int(rep)/3)
checkError(context, err)
})
It("Schedule BestEffort Job", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
job := &jobSpec{
name: "test",
tasks: []taskSpec{
{
img: "nginx",
req: slot,
min: 2,
rep: rep,
},
{
img: "nginx",
min: 2,
rep: rep / 2,
},
},
}
_, pg := createJob(context, job)
err := waitPodGroupReady(context, pg)
checkError(context, err)
})
It("Statement", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
job := &jobSpec{
namespace: "test",
tasks: []taskSpec{
{
img: "nginx",
req: slot,
min: rep,
rep: rep,
},
},
}
job.name = "st-qj-1"
_, pg1 := createJob(context, job)
err := waitPodGroupReady(context, pg1)
checkError(context, err)
now := time.Now()
job.name = "st-qj-2"
_, pg2 := createJob(context, job)
err = waitPodGroupUnschedulable(context, pg2)
checkError(context, err)
// No preemption event
evicted, err := podGroupEvicted(context, pg1, now)()
checkError(context, err)
Expect(evicted).NotTo(BeTrue())
})
It("TaskPriority", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
replicaset := createReplicaSet(context, "rs-1", rep/2, "nginx", slot)
err := waitReplicaSetReady(context, replicaset.Name)
checkError(context, err)
_, pg := createJob(context, &jobSpec{
name: "multi-pod-job",
tasks: []taskSpec{
{
img: "nginx",
pri: workerPriority,
min: rep/2 - 1,
rep: rep,
req: slot,
},
{
img: "nginx",
pri: masterPriority,
min: 1,
rep: 1,
req: slot,
},
},
})
expteced := map[string]int{
masterPriority: 1,
workerPriority: int(rep/2) - 1,
}
err = waitTasksReadyEx(context, pg, expteced)
checkError(context, err)
})
It("Try to fit unassigned task with different resource requests in one loop", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
minMemberOverride := int32(1)
replicaset := createReplicaSet(context, "rs-1", rep-1, "nginx", slot)
err := waitReplicaSetReady(context, replicaset.Name)
checkError(context, err)
_, pg := createJob(context, &jobSpec{
name: "multi-task-diff-resource-job",
tasks: []taskSpec{
{
img: "nginx",
pri: masterPriority,
min: 1,
rep: 1,
req: oneAndHalfCPU,
},
{
img: "nginx",
pri: workerPriority,
min: 1,
rep: 1,
req: halfCPU,
},
},
minMember: &minMemberOverride,
})
err = waitPodGroupPending(context, pg)
checkError(context, err)
// task_1 has been scheduled
err = waitTasksReady(context, pg, int(minMemberOverride))
checkError(context, err)
})
It("Job Priority", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
replicaset := createReplicaSet(context, "rs-1", rep, "nginx", slot)
err := waitReplicaSetReady(context, replicaset.Name)
checkError(context, err)
job1 := &jobSpec{
name: "pri-job-1",
pri: workerPriority,
tasks: []taskSpec{
{
img: "nginx",
req: oneCPU,
min: rep/2 + 1,
rep: rep,
},
},
}
job2 := &jobSpec{
name: "pri-job-2",
pri: masterPriority,
tasks: []taskSpec{
{
img: "nginx",
req: oneCPU,
min: rep/2 + 1,
rep: rep,
},
},
}
createJob(context, job1)
_, pg2 := createJob(context, job2)
// Delete ReplicaSet
err = deleteReplicaSet(context, replicaset.Name)
checkError(context, err)
err = waitPodGroupReady(context, pg2)
checkError(context, err)
})
It("Proportion", func() {
context := initTestContext()
defer cleanupTestContext(context)
createQueues(context)
defer deleteQueues(context)
cpuSlot := halfCPU
cpuRep := clusterSize(context, cpuSlot)
memSlot := oneGigaByteMem
memRep := clusterSize(context, memSlot)
spec2 := &jobSpec{
namespace: "test",
tasks: []taskSpec{
{
img: "nginx",
req: cpuSlot,
min: 1,
rep: 1,
},
},
}
spec2.name = "q2-job-1"
spec2.queue = "q2"
_, pg2 := createJob(context, spec2)
err := waitPodGroupReady(context, pg2)
checkError(context, err)
spec := &jobSpec{
namespace: "test",
tasks: []taskSpec{
{
img: "nginx",
req: cpuSlot,
min: cpuRep - 2,
rep: cpuRep - 2,
},
{
img: "nginx",
req: memSlot,
min: memRep/2 - 1,
rep: memRep/2 - 1,
},
},
}
spec.name = "q1-job-1"
spec.queue = "q1"
_, pg1 := createJob(context, spec)
err = waitPodGroupReady(context, pg1)
checkError(context, err)
spec2.name = "q1-job-2"
spec2.queue = "q1"
_, pg3 := createJob(context, spec2)
err = waitPodGroupReady(context, pg3)
checkError(context, err)
})
})
fix e2e preemption test
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Job E2E Test", func() {
It("Schedule Job", func() {
context := initTestContext()
defer cleanupTestContext(context)
rep := clusterSize(context, oneCPU)
_, pg := createJob(context, &jobSpec{
name: "qj-1",
tasks: []taskSpec{
{
img: "busybox",
req: oneCPU,
min: 2,
rep: rep,
},
},
})
err := waitPodGroupReady(context, pg)
checkError(context, err)
})
It("Schedule Multiple Jobs", func() {
context := initTestContext()
defer cleanupTestContext(context)
rep := clusterSize(context, oneCPU)
job := &jobSpec{
tasks: []taskSpec{
{
img: "busybox",
req: oneCPU,
min: 2,
rep: rep,
},
},
}
job.name = "mqj-1"
_, pg1 := createJob(context, job)
job.name = "mqj-2"
_, pg2 := createJob(context, job)
job.name = "mqj-3"
_, pg3 := createJob(context, job)
err := waitPodGroupReady(context, pg1)
checkError(context, err)
err = waitPodGroupReady(context, pg2)
checkError(context, err)
err = waitPodGroupReady(context, pg3)
checkError(context, err)
})
It("Gang scheduling", func() {
context := initTestContext()
defer cleanupTestContext(context)
rep := clusterSize(context, oneCPU)/2 + 1
replicaset := createReplicaSet(context, "rs-1", rep, "nginx", oneCPU)
err := waitReplicaSetReady(context, replicaset.Name)
checkError(context, err)
job := &jobSpec{
name: "gang-qj",
namespace: "test",
tasks: []taskSpec{
{
img: "busybox",
req: oneCPU,
min: rep,
rep: rep,
},
},
}
_, pg := createJob(context, job)
err = waitPodGroupPending(context, pg)
checkError(context, err)
err = waitPodGroupUnschedulable(context, pg)
checkError(context, err)
err = deleteReplicaSet(context, replicaset.Name)
checkError(context, err)
err = waitPodGroupReady(context, pg)
checkError(context, err)
})
It("Gang scheduling: Full Occupied", func() {
context := initTestContext()
defer cleanupTestContext(context)
rep := clusterSize(context, oneCPU)
job := &jobSpec{
namespace: "test",
tasks: []taskSpec{
{
img: "nginx",
req: oneCPU,
min: rep,
rep: rep,
},
},
}
job.name = "gang-fq-qj1"
_, pg1 := createJob(context, job)
err := waitPodGroupReady(context, pg1)
checkError(context, err)
job.name = "gang-fq-qj2"
_, pg2 := createJob(context, job)
err = waitPodGroupPending(context, pg2)
checkError(context, err)
err = waitPodGroupReady(context, pg1)
checkError(context, err)
})
It("Preemption", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
job := &jobSpec{
tasks: []taskSpec{
{
img: "nginx",
req: slot,
min: 1,
rep: rep,
},
},
}
job.name = "preemptee-qj"
job.pri = workerPriority
_, pg1 := createJob(context, job)
err := waitTasksReady(context, pg1, int(rep))
checkError(context, err)
job.name = "preemptor-qj"
job.pri = masterPriority
_, pg2 := createJob(context, job)
err = waitTasksReady(context, pg1, int(rep)/2)
checkError(context, err)
err = waitTasksReady(context, pg2, int(rep)/2)
checkError(context, err)
})
It("Multiple Preemption", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
job := &jobSpec{
tasks: []taskSpec{
{
img: "nginx",
req: slot,
min: 1,
rep: rep,
},
},
}
job.name = "preemptee-qj"
job.pri = workerPriority
_, pg1 := createJob(context, job)
err := waitTasksReady(context, pg1, int(rep))
checkError(context, err)
job.name = "preemptor-qj1"
job.pri = masterPriority
_, pg2 := createJob(context, job)
checkError(context, err)
job.name = "preemptor-qj2"
job.pri = masterPriority
_, pg3 := createJob(context, job)
checkError(context, err)
err = waitTasksReady(context, pg1, int(rep)/3)
checkError(context, err)
err = waitTasksReady(context, pg2, int(rep)/3)
checkError(context, err)
err = waitTasksReady(context, pg3, int(rep)/3)
checkError(context, err)
})
It("Schedule BestEffort Job", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
job := &jobSpec{
name: "test",
tasks: []taskSpec{
{
img: "nginx",
req: slot,
min: 2,
rep: rep,
},
{
img: "nginx",
min: 2,
rep: rep / 2,
},
},
}
_, pg := createJob(context, job)
err := waitPodGroupReady(context, pg)
checkError(context, err)
})
It("Statement", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
job := &jobSpec{
namespace: "test",
tasks: []taskSpec{
{
img: "nginx",
req: slot,
min: rep,
rep: rep,
},
},
}
job.name = "st-qj-1"
_, pg1 := createJob(context, job)
err := waitPodGroupReady(context, pg1)
checkError(context, err)
now := time.Now()
job.name = "st-qj-2"
_, pg2 := createJob(context, job)
err = waitPodGroupUnschedulable(context, pg2)
checkError(context, err)
// No preemption event
evicted, err := podGroupEvicted(context, pg1, now)()
checkError(context, err)
Expect(evicted).NotTo(BeTrue())
})
It("TaskPriority", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
replicaset := createReplicaSet(context, "rs-1", rep/2, "nginx", slot)
err := waitReplicaSetReady(context, replicaset.Name)
checkError(context, err)
_, pg := createJob(context, &jobSpec{
name: "multi-pod-job",
tasks: []taskSpec{
{
img: "nginx",
pri: workerPriority,
min: rep/2 - 1,
rep: rep,
req: slot,
},
{
img: "nginx",
pri: masterPriority,
min: 1,
rep: 1,
req: slot,
},
},
})
expteced := map[string]int{
masterPriority: 1,
workerPriority: int(rep/2) - 1,
}
err = waitTasksReadyEx(context, pg, expteced)
checkError(context, err)
})
It("Try to fit unassigned task with different resource requests in one loop", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
minMemberOverride := int32(1)
replicaset := createReplicaSet(context, "rs-1", rep-1, "nginx", slot)
err := waitReplicaSetReady(context, replicaset.Name)
checkError(context, err)
_, pg := createJob(context, &jobSpec{
name: "multi-task-diff-resource-job",
tasks: []taskSpec{
{
img: "nginx",
pri: masterPriority,
min: 1,
rep: 1,
req: oneAndHalfCPU,
},
{
img: "nginx",
pri: workerPriority,
min: 1,
rep: 1,
req: halfCPU,
},
},
minMember: &minMemberOverride,
})
err = waitPodGroupPending(context, pg)
checkError(context, err)
// task_1 has been scheduled
err = waitTasksReady(context, pg, int(minMemberOverride))
checkError(context, err)
})
It("Job Priority", func() {
context := initTestContext()
defer cleanupTestContext(context)
slot := oneCPU
rep := clusterSize(context, slot)
replicaset := createReplicaSet(context, "rs-1", rep, "nginx", slot)
err := waitReplicaSetReady(context, replicaset.Name)
checkError(context, err)
job1 := &jobSpec{
name: "pri-job-1",
pri: workerPriority,
tasks: []taskSpec{
{
img: "nginx",
req: oneCPU,
min: rep/2 + 1,
rep: rep,
},
},
}
job2 := &jobSpec{
name: "pri-job-2",
pri: masterPriority,
tasks: []taskSpec{
{
img: "nginx",
req: oneCPU,
min: rep/2 + 1,
rep: rep,
},
},
}
createJob(context, job1)
_, pg2 := createJob(context, job2)
// Delete ReplicaSet
err = deleteReplicaSet(context, replicaset.Name)
checkError(context, err)
err = waitPodGroupReady(context, pg2)
checkError(context, err)
})
It("Proportion", func() {
context := initTestContext()
defer cleanupTestContext(context)
createQueues(context)
defer deleteQueues(context)
cpuSlot := halfCPU
cpuRep := clusterSize(context, cpuSlot)
memSlot := oneGigaByteMem
memRep := clusterSize(context, memSlot)
spec2 := &jobSpec{
namespace: "test",
tasks: []taskSpec{
{
img: "nginx",
req: cpuSlot,
min: 1,
rep: 1,
},
},
}
spec2.name = "q2-job-1"
spec2.queue = "q2"
_, pg2 := createJob(context, spec2)
err := waitPodGroupReady(context, pg2)
checkError(context, err)
spec := &jobSpec{
namespace: "test",
tasks: []taskSpec{
{
img: "nginx",
req: cpuSlot,
min: cpuRep - 2,
rep: cpuRep - 2,
},
{
img: "nginx",
req: memSlot,
min: memRep/2 - 1,
rep: memRep/2 - 1,
},
},
}
spec.name = "q1-job-1"
spec.queue = "q1"
_, pg1 := createJob(context, spec)
err = waitPodGroupReady(context, pg1)
checkError(context, err)
spec2.name = "q1-job-2"
spec2.queue = "q1"
_, pg3 := createJob(context, spec2)
err = waitPodGroupReady(context, pg3)
checkError(context, err)
})
})
|
package main
import (
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"log"
)
func initiateCluster(host string) error {
cfg := bson.M{
"_id": "rs0",
"members": []bson.M{
bson.M{"_id": 0, "host": host},
},
}
dbSession, err := mgo.DialWithInfo(
&mgo.DialInfo{
Addrs: []string{"localhost"},
Direct: true,
FailFast: true,
Database: "admin",
})
if err != nil {
return err
}
dbSession.SetMode(mgo.Monotonic, true)
result := bson.D{}
log.Print(bson.D{{"replSetInitiate", cfg}})
err = dbSession.Run(bson.D{{"replSetInitiate", cfg}}, &result)
return err
}
func getNodeInfo() (interface{}, error) {
dbSession, err := mgo.DialWithInfo(
&mgo.DialInfo{
Addrs: []string{"localhost"},
Direct: true,
FailFast: true,
Database: "local",
})
if err != nil {
return nil, err
}
var result []bson.M
err = dbSession.DB("local").C("system.replset").Find(bson.M{}).All(&result)
log.Println(result)
return result[0]["members"], err
}
add rabbity metadata to mongodb rs member data
package main
import (
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"log"
)
func initiateCluster(host string) error {
cfg := bson.M{
"_id": "rs0",
"members": []bson.M{
bson.M{"_id": 0, "host": host, "tags": bson.M{"rabbity_port": "8080"}},
},
}
dbSession, err := mgo.DialWithInfo(
&mgo.DialInfo{
Addrs: []string{"localhost"},
Direct: true,
FailFast: true,
Database: "admin",
})
if err != nil {
return err
}
dbSession.SetMode(mgo.Monotonic, true)
result := bson.D{}
log.Print(bson.D{{"replSetInitiate", cfg}})
err = dbSession.Run(bson.D{{"replSetInitiate", cfg}}, &result)
return err
}
func getNodeInfo() (interface{}, error) {
dbSession, err := mgo.DialWithInfo(
&mgo.DialInfo{
Addrs: []string{"localhost"},
Direct: true,
FailFast: true,
Database: "local",
})
if err != nil {
return nil, err
}
var result []bson.M
err = dbSession.DB("local").C("system.replset").Find(bson.M{}).All(&result)
log.Println(result)
return result[0]["members"], err
}
|
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package node
import (
"time"
"github.com/ava-labs/avalanchego/utils/constants"
"github.com/ava-labs/avalanchego/version"
)
var (
Version = version.NewDefaultVersion(constants.PlatformName, 1, 4, 2)
MinimumCompatibleVersion = version.NewDefaultVersion(constants.PlatformName, 1, 4, 0)
PrevMinimumCompatibleVersion = version.NewDefaultVersion(constants.PlatformName, 1, 3, 0)
MinimumUnmaskedVersion = version.NewDefaultVersion(constants.PlatformName, 1, 1, 0)
PrevMinimumUnmaskedVersion = version.NewDefaultVersion(constants.PlatformName, 1, 0, 0)
VersionParser = version.NewDefaultParser()
ApricotPhase0Times = map[uint32]time.Time{
constants.MainnetID: time.Date(2020, time.December, 8, 3, 0, 0, 0, time.UTC),
constants.FujiID: time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC),
}
ApricotPhase0DefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC)
ApricotPhase1Times = map[uint32]time.Time{
constants.MainnetID: time.Date(2021, time.March, 31, 14, 0, 0, 0, time.UTC),
constants.FujiID: time.Date(2021, time.March, 26, 14, 0, 0, 0, time.UTC),
}
ApricotPhase1DefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC)
ApricotPhase2Times = map[uint32]time.Time{
constants.MainnetID: time.Date(2021, time.May, 10, 11, 0, 0, 0, time.UTC),
constants.FujiID: time.Date(2021, time.May, 5, 14, 0, 0, 0, time.UTC),
}
ApricotPhase2DefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC)
)
func GetApricotPhase0Time(networkID uint32) time.Time {
if upgradeTime, exists := ApricotPhase0Times[networkID]; exists {
return upgradeTime
}
return ApricotPhase0DefaultTime
}
func GetApricotPhase1Time(networkID uint32) time.Time {
if upgradeTime, exists := ApricotPhase1Times[networkID]; exists {
return upgradeTime
}
return ApricotPhase1DefaultTime
}
func GetApricotPhase2Time(networkID uint32) time.Time {
if upgradeTime, exists := ApricotPhase2Times[networkID]; exists {
return upgradeTime
}
return ApricotPhase2DefaultTime
}
bumped node version
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package node
import (
"time"
"github.com/ava-labs/avalanchego/utils/constants"
"github.com/ava-labs/avalanchego/version"
)
var (
Version = version.NewDefaultVersion(constants.PlatformName, 1, 4, 3)
MinimumCompatibleVersion = version.NewDefaultVersion(constants.PlatformName, 1, 4, 0)
PrevMinimumCompatibleVersion = version.NewDefaultVersion(constants.PlatformName, 1, 3, 0)
MinimumUnmaskedVersion = version.NewDefaultVersion(constants.PlatformName, 1, 1, 0)
PrevMinimumUnmaskedVersion = version.NewDefaultVersion(constants.PlatformName, 1, 0, 0)
VersionParser = version.NewDefaultParser()
ApricotPhase0Times = map[uint32]time.Time{
constants.MainnetID: time.Date(2020, time.December, 8, 3, 0, 0, 0, time.UTC),
constants.FujiID: time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC),
}
ApricotPhase0DefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC)
ApricotPhase1Times = map[uint32]time.Time{
constants.MainnetID: time.Date(2021, time.March, 31, 14, 0, 0, 0, time.UTC),
constants.FujiID: time.Date(2021, time.March, 26, 14, 0, 0, 0, time.UTC),
}
ApricotPhase1DefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC)
ApricotPhase2Times = map[uint32]time.Time{
constants.MainnetID: time.Date(2021, time.May, 10, 11, 0, 0, 0, time.UTC),
constants.FujiID: time.Date(2021, time.May, 5, 14, 0, 0, 0, time.UTC),
}
ApricotPhase2DefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC)
)
func GetApricotPhase0Time(networkID uint32) time.Time {
if upgradeTime, exists := ApricotPhase0Times[networkID]; exists {
return upgradeTime
}
return ApricotPhase0DefaultTime
}
func GetApricotPhase1Time(networkID uint32) time.Time {
if upgradeTime, exists := ApricotPhase1Times[networkID]; exists {
return upgradeTime
}
return ApricotPhase1DefaultTime
}
func GetApricotPhase2Time(networkID uint32) time.Time {
if upgradeTime, exists := ApricotPhase2Times[networkID]; exists {
return upgradeTime
}
return ApricotPhase2DefaultTime
}
|
package main
import (
"crypto/md5"
"database/sql"
"fmt"
"html/template"
"strings"
"time"
"github.com/russross/blackfriday"
)
type Data interface {
hiddenPosts(flag bool)
post(url string) *Entry
postID(url string) (id int64, err error)
posts(limit, offset int) []*Entry
titles(limit int) []*EntryLink
titlesByTag(tag string) []*EntryLink
allComments() []*CommentWithPostTitle
numPosts() int
author(username string) (*Author, error)
deleteComment(id string) bool
deletePost(url string) bool
updateComment(id, text string) bool
commenter(name, email, website, ip string) (id int64, err error)
insertCommenter(name, email, website, ip string) (id int64, err error)
insertComment(commenterID, postID int64, body string) (id int64, err error)
insertPost(author int64, e *Entry) (id int64, err error)
updatePost(id int64, e *Entry) bool
updateTags(tags []*Tag, postID int64)
begin() bool
commit()
rollback()
}
type DbData struct {
db *sql.DB
tx *sql.Tx
includeHidden bool
}
func (dd *DbData) hiddenPosts(flag bool) {
dd.includeHidden = flag
}
func (dd *DbData) begin() bool {
if dd.tx != nil {
logger.Println("Error! DbData.begin() called within transaction!")
return false
}
xaction, err := dd.db.Begin()
if err != nil {
logger.Println(err.Error())
return false
}
dd.tx = xaction
return true
}
func (dd *DbData) commit() {
if dd.tx == nil {
logger.Println("Error! DbData.commit() called outside of transaction!")
return
}
dd.tx.Commit()
dd.tx = nil
}
func (dd *DbData) rollback() {
if dd.tx == nil {
logger.Println("Error! DbData.rollback() called outside of transaction!")
return
}
dd.tx.Rollback()
dd.tx = nil
}
func (dd *DbData) post(url string) *Entry {
posts := loadPosts(dd.db, -1, -1, url, dd.includeHidden)
if len(posts) != 1 {
msg := "Error! DbData.post(%q) should return 1 post, but returned %d\n"
logger.Println(fmt.Sprintf(msg, url, len(posts)))
return nil
}
return posts[0]
}
func (dd *DbData) postID(url string) (id int64, err error) {
query, err := dd.db.Prepare("select id from post where url = $1")
if err != nil {
return
}
defer query.Close()
err = query.QueryRow(url).Scan(&id)
return
}
func (dd *DbData) posts(limit, offset int) []*Entry {
return loadPosts(dd.db, limit, offset, "", dd.includeHidden)
}
func (dd *DbData) numPosts() int {
selectSql := "select count(*) from post as p"
if !dd.includeHidden {
selectSql = selectSql + " where p.hidden=FALSE"
}
rows, err := dd.db.Query(selectSql)
if err != nil {
logger.Println(err.Error())
return 0
}
defer rows.Close()
num := 0
if rows.Next() {
rows.Scan(&num)
}
return num
}
func (dd *DbData) titles(limit int) (links []*EntryLink) {
selectSql := `select p.title, p.url, p.hidden
from post as p`
if !dd.includeHidden {
selectSql = selectSql + " where p.hidden=FALSE"
}
selectSql = selectSql + " order by p.date desc"
if limit > 0 {
selectSql = selectSql + " limit $1"
}
stmt, err := dd.db.Prepare(selectSql)
if err != nil {
logger.Println(err.Error())
return
}
defer stmt.Close()
var rows *sql.Rows
if limit > 0 {
rows, err = stmt.Query(limit)
} else {
rows, err = stmt.Query()
}
if err != nil {
logger.Println(err.Error())
return
}
defer rows.Close()
return scanEntryLinks(rows)
}
func (dd *DbData) titlesByTag(tag string) (links []*EntryLink) {
selectSql := `select p.title, p.url, p.hidden
from post as p
where p.id in (select tm.post_id from tagmap as tm
inner join tag as t
on tm.tag_id = t.id and t.url=$1)`
if !dd.includeHidden {
selectSql = selectSql + " and p.hidden=FALSE"
}
selectSql = selectSql + " order by p.date desc"
stmt, err := dd.db.Prepare(selectSql)
if err != nil {
logger.Println(err.Error())
return
}
defer stmt.Close()
rows, err := stmt.Query(tag)
if err != nil {
logger.Println(err.Error())
return
}
defer rows.Close()
return scanEntryLinks(rows)
}
func scanEntryLinks(rows *sql.Rows) (links []*EntryLink) {
for rows.Next() {
entryLink := new(EntryLink)
err := rows.Scan(&entryLink.Title, &entryLink.URL, &entryLink.Hidden)
if err != nil {
logger.Println(err.Error())
continue
}
links = append(links, entryLink)
}
err := rows.Err()
if err != nil {
logger.Println(err.Error())
}
return
}
func (dd *DbData) allComments() []*CommentWithPostTitle {
stmt, err := dd.db.Prepare(`select a.name, a.email, a.www, a.ip,
c.id, c.timestamp, c.body,
p.title, p.url
from commenter as a, comment as c, post as p
where a.id = c.commenter_id
and c.post_id = p.id
order by c.timestamp desc`)
if err != nil {
logger.Println(err.Error())
return nil
}
defer stmt.Close()
data, err := stmt.Query()
if err != nil {
logger.Println(err.Error())
return nil
}
defer data.Close()
var comments []*CommentWithPostTitle
for data.Next() {
comment := new(CommentWithPostTitle)
var unixDate int64
err = data.Scan(&comment.Name, &comment.Email, &comment.Website, &comment.IP,
&comment.CommentID, &unixDate, &comment.RawBody,
&comment.Title, &comment.URL)
if err != nil {
logger.Printf("error scanning comment row: %s\n", err.Error())
}
hash := md5.New()
hash.Write([]byte(strings.ToLower(comment.Email)))
comment.EmailHash = fmt.Sprintf("%x", hash.Sum(nil))
comment.Time = time.Unix(unixDate, 0).Format("2006-01-02 15:04")
comment.Body = template.HTML(blackfriday.MarkdownCommon([]byte(comment.RawBody)))
comments = append(comments, comment)
}
err = data.Err()
if err != nil {
logger.Printf("error scanning comment row: %s\n", err.Error())
}
return comments
}
func (dd *DbData) commenter(name, email, website, ip string) (id int64, err error) {
id = -1
query, err := dd.db.Prepare(`select c.id from commenter as c
where c.name = $1
and c.email = $2
and c.www = $3`)
if err != nil {
logger.Println("err: " + err.Error())
return
}
defer query.Close()
err = query.QueryRow(name, email, website).Scan(&id)
if err != nil {
logger.Println("err: " + err.Error())
}
return
}
func (dd *DbData) insertCommenter(name, email, website, ip string) (id int64, err error) {
if dd.tx == nil {
return -1, fmt.Errorf("DbData.insertCommenter() can only be called within xaction!")
}
insertCommenter, _ := dd.tx.Prepare(`insert into commenter
(name, email, www, ip)
values ($1, $2, $3, $4)
returning id`)
defer insertCommenter.Close()
err = insertCommenter.QueryRow(name, email, website, ip).Scan(&id)
if err != nil {
logger.Println("Failed to insert commenter: " + err.Error())
}
return
}
func (dd *DbData) insertComment(commenterID, postID int64, body string) (id int64, err error) {
if dd.tx == nil {
return -1, fmt.Errorf("DbData.insertComment() can only be called within xaction!")
}
stmt, err := dd.tx.Prepare(`insert into comment
(commenter_id, post_id, timestamp, body)
values ($1, $2, $3, $4)
returning id`)
if err != nil {
logger.Println("Failed to prepare insert comment stmt: " + err.Error())
return
}
defer stmt.Close()
err = stmt.QueryRow(commenterID, postID, time.Now().Unix(), body).Scan(&id)
if err != nil {
logger.Println("Failed to insert comment: " + err.Error())
return
}
return
}
func (dd *DbData) insertPost(author int64, e *Entry) (id int64, err error) {
if dd.tx == nil {
return -1, fmt.Errorf("DbData.insertPost() can only be called within xaction!")
}
insertPostSql, _ := dd.tx.Prepare(`insert into post
(author_id, title, date, url, body, hidden)
values ($1, $2, $3, $4, $5, $6)
returning id`)
defer insertPostSql.Close()
date := time.Now().Unix()
err = insertPostSql.QueryRow(author, e.Title, date, e.URL,
string(e.Body), e.Hidden).Scan(&id)
if err != nil {
logger.Println("Failed to insert post: " + err.Error())
return
}
return
}
func (dd *DbData) updatePost(id int64, e *Entry) bool {
updateStmt, _ := dd.tx.Prepare(`update post
set title=$1, url=$2, body=$3, hidden=$4
where id=$5`)
defer updateStmt.Close()
_, err := updateStmt.Exec(e.Title, e.URL, string(e.Body), e.Hidden, id)
if err != nil {
logger.Println(err.Error())
return false
}
return true
}
func (dd *DbData) updateTags(tags []*Tag, postID int64) {
delStmt, _ := dd.tx.Prepare("delete from tagmap where post_id=$1")
defer delStmt.Close()
delStmt.Exec(postID)
for _, t := range tags {
tagID, err := insertOrGetTagID(dd.tx, t)
if err == nil {
updateTagMap(dd.tx, postID, tagID)
}
}
}
func (dd *DbData) author(username string) (*Author, error) {
row := dd.db.QueryRow(`select passwd, full_name, email, www
from author where disp_name=$1`, username)
var a Author
a.UserName = username
err := row.Scan(&a.Passwd, &a.FullName, &a.Email, &a.Www)
return &a, err
}
func (dd *DbData) deleteComment(id string) bool {
_, err := dd.db.Exec("delete from comment where id=$1", id)
if err != nil {
logger.Println(err.Error())
return false
}
return true
}
func (dd *DbData) deletePost(url string) bool {
_, err := dd.db.Exec("delete from post where url=$1", url)
if err != nil {
logger.Println(err.Error())
return false
}
return true
}
func (dd *DbData) updateComment(id, text string) bool {
_, err := dd.db.Exec("update comment set body=$1 where id=$2", text, id)
if err != nil {
logger.Println(err.Error())
return false
}
return true
}
func loadPosts(db *sql.DB, limit, offset int, url string, includeHidden bool) []*Entry {
if db == nil {
return nil
}
data, err := queryPosts(db, limit, offset, url, includeHidden)
if err != nil {
logger.Println(err.Error())
return nil
}
return data
}
func queryPosts(db *sql.DB, limit, offset int,
url string, includeHidden bool) (entries []*Entry, err error) {
postURLWhereClause := ""
if url != "" {
postURLWhereClause = fmt.Sprintf("and p.url='%s'", url)
}
postHiddenWhereClause := ""
if !includeHidden {
postHiddenWhereClause = "and p.hidden=FALSE"
}
limitClause := ""
if limit >= 0 {
limitClause = fmt.Sprintf("limit %d", limit)
}
offsetClause := ""
if offset > 0 {
offsetClause = fmt.Sprintf("offset %d", offset)
}
queryFmt := `select a.disp_name, p.id, p.title, p.date, p.body,
p.url, p.hidden
from author as a, post as p
where a.id=p.author_id
%s %s
order by p.date desc
%s %s`
query := fmt.Sprintf(queryFmt, postURLWhereClause, postHiddenWhereClause,
limitClause, offsetClause)
rows, err := db.Query(query)
if err != nil {
logger.Println(err.Error())
return
}
defer rows.Close()
for rows.Next() {
entry := new(Entry)
var id int64
var unixDate int64
err = rows.Scan(&entry.Author, &id, &entry.Title, &unixDate,
&entry.RawBody, &entry.URL, &entry.Hidden)
if err != nil {
logger.Println(err.Error())
continue
}
entry.Body = template.HTML(blackfriday.MarkdownCommon([]byte(entry.RawBody)))
entry.Date = time.Unix(unixDate, 0).Format("2006-01-02")
entry.Tags = queryTags(db, id)
entry.Comments = queryComments(db, id)
entries = append(entries, entry)
}
err = rows.Err()
if err != nil {
logger.Printf("error scanning post row: %s\n", err.Error())
}
return
}
func queryTags(db *sql.DB, postID int64) []*Tag {
stmt, err := db.Prepare(`select t.name, t.url
from tag as t, tagmap as tm
where t.id = tm.tag_id
and tm.post_id = $1`)
if err != nil {
logger.Println(err.Error())
return nil
}
defer stmt.Close()
rows, err := stmt.Query(postID)
if err != nil {
logger.Println(err.Error())
return nil
}
defer rows.Close()
var tags []*Tag
for rows.Next() {
tag := new(Tag)
err = rows.Scan(&tag.TagName, &tag.TagURL)
if err != nil {
logger.Println(err.Error())
continue
}
tags = append(tags, tag)
}
err = rows.Err()
if err != nil {
logger.Printf("error scanning tag row: %s\n", err.Error())
}
return tags
}
func queryComments(db *sql.DB, postID int64) []*Comment {
stmt, err := db.Prepare(`select a.name, a.email, a.www, a.ip,
c.id, c.timestamp, c.body
from commenter as a, comment as c
where a.id = c.commenter_id
and c.post_id = $1
order by c.timestamp asc`)
if err != nil {
logger.Println(err.Error())
return nil
}
defer stmt.Close()
data, err := stmt.Query(postID)
if err != nil {
logger.Println(err.Error())
return nil
}
defer data.Close()
var comments []*Comment
for data.Next() {
comment := new(Comment)
var unixDate int64
err = data.Scan(&comment.Name, &comment.Email, &comment.Website, &comment.IP,
&comment.CommentID, &unixDate, &comment.RawBody)
if err != nil {
logger.Printf("error scanning comment row: %s\n", err.Error())
}
hash := md5.New()
hash.Write([]byte(strings.ToLower(comment.Email)))
comment.EmailHash = fmt.Sprintf("%x", hash.Sum(nil))
comment.Time = time.Unix(unixDate, 0).Format("2006-01-02 15:04")
comment.Body = template.HTML(blackfriday.MarkdownCommon([]byte(comment.RawBody)))
comments = append(comments, comment)
}
err = data.Err()
if err != nil {
logger.Printf("error scanning comment row: %s\n", err.Error())
}
return comments
}
func insertOrGetTagID(xaction *sql.Tx, tag *Tag) (tagID int64, err error) {
query, err := xaction.Prepare("select id from tag where url=$1")
if err != nil {
logger.Println("Failed to prepare select tag stmt: " + err.Error())
return
}
defer query.Close()
err = query.QueryRow(tag.TagURL).Scan(&tagID)
switch err {
case nil:
return
case sql.ErrNoRows:
insertTagSql, err := xaction.Prepare(`insert into tag
(name, url)
values ($1, $2)
returning id`)
if err != nil {
logger.Println("Failed to prepare insert tag stmt: " + err.Error())
return -1, err
}
defer insertTagSql.Close()
err = insertTagSql.QueryRow(tag.TagName, tag.TagURL).Scan(&tagID)
if err != nil {
logger.Println("Failed to insert tag: " + err.Error())
}
return tagID, err
default:
logger.Printf("err: %s", err.Error())
return -1, err
}
return -1, fmt.Errorf("Unexpected error in insertOrGetTagID(), should never get here.")
}
func updateTagMap(xaction *sql.Tx, postID int64, tagID int64) {
stmt, err := xaction.Prepare(`insert into tagmap
(tag_id, post_id)
values ($1, $2)`)
if err != nil {
logger.Println("Failed to prepare insrt tagmap stmt: " + err.Error())
}
defer stmt.Close()
stmt.Exec(tagID, postID)
}
go vet: remove unreachable code
package main
import (
"crypto/md5"
"database/sql"
"fmt"
"html/template"
"strings"
"time"
"github.com/russross/blackfriday"
)
type Data interface {
hiddenPosts(flag bool)
post(url string) *Entry
postID(url string) (id int64, err error)
posts(limit, offset int) []*Entry
titles(limit int) []*EntryLink
titlesByTag(tag string) []*EntryLink
allComments() []*CommentWithPostTitle
numPosts() int
author(username string) (*Author, error)
deleteComment(id string) bool
deletePost(url string) bool
updateComment(id, text string) bool
commenter(name, email, website, ip string) (id int64, err error)
insertCommenter(name, email, website, ip string) (id int64, err error)
insertComment(commenterID, postID int64, body string) (id int64, err error)
insertPost(author int64, e *Entry) (id int64, err error)
updatePost(id int64, e *Entry) bool
updateTags(tags []*Tag, postID int64)
begin() bool
commit()
rollback()
}
type DbData struct {
db *sql.DB
tx *sql.Tx
includeHidden bool
}
func (dd *DbData) hiddenPosts(flag bool) {
dd.includeHidden = flag
}
func (dd *DbData) begin() bool {
if dd.tx != nil {
logger.Println("Error! DbData.begin() called within transaction!")
return false
}
xaction, err := dd.db.Begin()
if err != nil {
logger.Println(err.Error())
return false
}
dd.tx = xaction
return true
}
func (dd *DbData) commit() {
if dd.tx == nil {
logger.Println("Error! DbData.commit() called outside of transaction!")
return
}
dd.tx.Commit()
dd.tx = nil
}
func (dd *DbData) rollback() {
if dd.tx == nil {
logger.Println("Error! DbData.rollback() called outside of transaction!")
return
}
dd.tx.Rollback()
dd.tx = nil
}
func (dd *DbData) post(url string) *Entry {
posts := loadPosts(dd.db, -1, -1, url, dd.includeHidden)
if len(posts) != 1 {
msg := "Error! DbData.post(%q) should return 1 post, but returned %d\n"
logger.Println(fmt.Sprintf(msg, url, len(posts)))
return nil
}
return posts[0]
}
func (dd *DbData) postID(url string) (id int64, err error) {
query, err := dd.db.Prepare("select id from post where url = $1")
if err != nil {
return
}
defer query.Close()
err = query.QueryRow(url).Scan(&id)
return
}
func (dd *DbData) posts(limit, offset int) []*Entry {
return loadPosts(dd.db, limit, offset, "", dd.includeHidden)
}
func (dd *DbData) numPosts() int {
selectSql := "select count(*) from post as p"
if !dd.includeHidden {
selectSql = selectSql + " where p.hidden=FALSE"
}
rows, err := dd.db.Query(selectSql)
if err != nil {
logger.Println(err.Error())
return 0
}
defer rows.Close()
num := 0
if rows.Next() {
rows.Scan(&num)
}
return num
}
func (dd *DbData) titles(limit int) (links []*EntryLink) {
selectSql := `select p.title, p.url, p.hidden
from post as p`
if !dd.includeHidden {
selectSql = selectSql + " where p.hidden=FALSE"
}
selectSql = selectSql + " order by p.date desc"
if limit > 0 {
selectSql = selectSql + " limit $1"
}
stmt, err := dd.db.Prepare(selectSql)
if err != nil {
logger.Println(err.Error())
return
}
defer stmt.Close()
var rows *sql.Rows
if limit > 0 {
rows, err = stmt.Query(limit)
} else {
rows, err = stmt.Query()
}
if err != nil {
logger.Println(err.Error())
return
}
defer rows.Close()
return scanEntryLinks(rows)
}
func (dd *DbData) titlesByTag(tag string) (links []*EntryLink) {
selectSql := `select p.title, p.url, p.hidden
from post as p
where p.id in (select tm.post_id from tagmap as tm
inner join tag as t
on tm.tag_id = t.id and t.url=$1)`
if !dd.includeHidden {
selectSql = selectSql + " and p.hidden=FALSE"
}
selectSql = selectSql + " order by p.date desc"
stmt, err := dd.db.Prepare(selectSql)
if err != nil {
logger.Println(err.Error())
return
}
defer stmt.Close()
rows, err := stmt.Query(tag)
if err != nil {
logger.Println(err.Error())
return
}
defer rows.Close()
return scanEntryLinks(rows)
}
func scanEntryLinks(rows *sql.Rows) (links []*EntryLink) {
for rows.Next() {
entryLink := new(EntryLink)
err := rows.Scan(&entryLink.Title, &entryLink.URL, &entryLink.Hidden)
if err != nil {
logger.Println(err.Error())
continue
}
links = append(links, entryLink)
}
err := rows.Err()
if err != nil {
logger.Println(err.Error())
}
return
}
func (dd *DbData) allComments() []*CommentWithPostTitle {
stmt, err := dd.db.Prepare(`select a.name, a.email, a.www, a.ip,
c.id, c.timestamp, c.body,
p.title, p.url
from commenter as a, comment as c, post as p
where a.id = c.commenter_id
and c.post_id = p.id
order by c.timestamp desc`)
if err != nil {
logger.Println(err.Error())
return nil
}
defer stmt.Close()
data, err := stmt.Query()
if err != nil {
logger.Println(err.Error())
return nil
}
defer data.Close()
var comments []*CommentWithPostTitle
for data.Next() {
comment := new(CommentWithPostTitle)
var unixDate int64
err = data.Scan(&comment.Name, &comment.Email, &comment.Website, &comment.IP,
&comment.CommentID, &unixDate, &comment.RawBody,
&comment.Title, &comment.URL)
if err != nil {
logger.Printf("error scanning comment row: %s\n", err.Error())
}
hash := md5.New()
hash.Write([]byte(strings.ToLower(comment.Email)))
comment.EmailHash = fmt.Sprintf("%x", hash.Sum(nil))
comment.Time = time.Unix(unixDate, 0).Format("2006-01-02 15:04")
comment.Body = template.HTML(blackfriday.MarkdownCommon([]byte(comment.RawBody)))
comments = append(comments, comment)
}
err = data.Err()
if err != nil {
logger.Printf("error scanning comment row: %s\n", err.Error())
}
return comments
}
func (dd *DbData) commenter(name, email, website, ip string) (id int64, err error) {
id = -1
query, err := dd.db.Prepare(`select c.id from commenter as c
where c.name = $1
and c.email = $2
and c.www = $3`)
if err != nil {
logger.Println("err: " + err.Error())
return
}
defer query.Close()
err = query.QueryRow(name, email, website).Scan(&id)
if err != nil {
logger.Println("err: " + err.Error())
}
return
}
func (dd *DbData) insertCommenter(name, email, website, ip string) (id int64, err error) {
if dd.tx == nil {
return -1, fmt.Errorf("DbData.insertCommenter() can only be called within xaction!")
}
insertCommenter, _ := dd.tx.Prepare(`insert into commenter
(name, email, www, ip)
values ($1, $2, $3, $4)
returning id`)
defer insertCommenter.Close()
err = insertCommenter.QueryRow(name, email, website, ip).Scan(&id)
if err != nil {
logger.Println("Failed to insert commenter: " + err.Error())
}
return
}
func (dd *DbData) insertComment(commenterID, postID int64, body string) (id int64, err error) {
if dd.tx == nil {
return -1, fmt.Errorf("DbData.insertComment() can only be called within xaction!")
}
stmt, err := dd.tx.Prepare(`insert into comment
(commenter_id, post_id, timestamp, body)
values ($1, $2, $3, $4)
returning id`)
if err != nil {
logger.Println("Failed to prepare insert comment stmt: " + err.Error())
return
}
defer stmt.Close()
err = stmt.QueryRow(commenterID, postID, time.Now().Unix(), body).Scan(&id)
if err != nil {
logger.Println("Failed to insert comment: " + err.Error())
return
}
return
}
func (dd *DbData) insertPost(author int64, e *Entry) (id int64, err error) {
if dd.tx == nil {
return -1, fmt.Errorf("DbData.insertPost() can only be called within xaction!")
}
insertPostSql, _ := dd.tx.Prepare(`insert into post
(author_id, title, date, url, body, hidden)
values ($1, $2, $3, $4, $5, $6)
returning id`)
defer insertPostSql.Close()
date := time.Now().Unix()
err = insertPostSql.QueryRow(author, e.Title, date, e.URL,
string(e.Body), e.Hidden).Scan(&id)
if err != nil {
logger.Println("Failed to insert post: " + err.Error())
return
}
return
}
func (dd *DbData) updatePost(id int64, e *Entry) bool {
updateStmt, _ := dd.tx.Prepare(`update post
set title=$1, url=$2, body=$3, hidden=$4
where id=$5`)
defer updateStmt.Close()
_, err := updateStmt.Exec(e.Title, e.URL, string(e.Body), e.Hidden, id)
if err != nil {
logger.Println(err.Error())
return false
}
return true
}
func (dd *DbData) updateTags(tags []*Tag, postID int64) {
delStmt, _ := dd.tx.Prepare("delete from tagmap where post_id=$1")
defer delStmt.Close()
delStmt.Exec(postID)
for _, t := range tags {
tagID, err := insertOrGetTagID(dd.tx, t)
if err == nil {
updateTagMap(dd.tx, postID, tagID)
}
}
}
func (dd *DbData) author(username string) (*Author, error) {
row := dd.db.QueryRow(`select passwd, full_name, email, www
from author where disp_name=$1`, username)
var a Author
a.UserName = username
err := row.Scan(&a.Passwd, &a.FullName, &a.Email, &a.Www)
return &a, err
}
func (dd *DbData) deleteComment(id string) bool {
_, err := dd.db.Exec("delete from comment where id=$1", id)
if err != nil {
logger.Println(err.Error())
return false
}
return true
}
func (dd *DbData) deletePost(url string) bool {
_, err := dd.db.Exec("delete from post where url=$1", url)
if err != nil {
logger.Println(err.Error())
return false
}
return true
}
func (dd *DbData) updateComment(id, text string) bool {
_, err := dd.db.Exec("update comment set body=$1 where id=$2", text, id)
if err != nil {
logger.Println(err.Error())
return false
}
return true
}
func loadPosts(db *sql.DB, limit, offset int, url string, includeHidden bool) []*Entry {
if db == nil {
return nil
}
data, err := queryPosts(db, limit, offset, url, includeHidden)
if err != nil {
logger.Println(err.Error())
return nil
}
return data
}
func queryPosts(db *sql.DB, limit, offset int,
url string, includeHidden bool) (entries []*Entry, err error) {
postURLWhereClause := ""
if url != "" {
postURLWhereClause = fmt.Sprintf("and p.url='%s'", url)
}
postHiddenWhereClause := ""
if !includeHidden {
postHiddenWhereClause = "and p.hidden=FALSE"
}
limitClause := ""
if limit >= 0 {
limitClause = fmt.Sprintf("limit %d", limit)
}
offsetClause := ""
if offset > 0 {
offsetClause = fmt.Sprintf("offset %d", offset)
}
queryFmt := `select a.disp_name, p.id, p.title, p.date, p.body,
p.url, p.hidden
from author as a, post as p
where a.id=p.author_id
%s %s
order by p.date desc
%s %s`
query := fmt.Sprintf(queryFmt, postURLWhereClause, postHiddenWhereClause,
limitClause, offsetClause)
rows, err := db.Query(query)
if err != nil {
logger.Println(err.Error())
return
}
defer rows.Close()
for rows.Next() {
entry := new(Entry)
var id int64
var unixDate int64
err = rows.Scan(&entry.Author, &id, &entry.Title, &unixDate,
&entry.RawBody, &entry.URL, &entry.Hidden)
if err != nil {
logger.Println(err.Error())
continue
}
entry.Body = template.HTML(blackfriday.MarkdownCommon([]byte(entry.RawBody)))
entry.Date = time.Unix(unixDate, 0).Format("2006-01-02")
entry.Tags = queryTags(db, id)
entry.Comments = queryComments(db, id)
entries = append(entries, entry)
}
err = rows.Err()
if err != nil {
logger.Printf("error scanning post row: %s\n", err.Error())
}
return
}
func queryTags(db *sql.DB, postID int64) []*Tag {
stmt, err := db.Prepare(`select t.name, t.url
from tag as t, tagmap as tm
where t.id = tm.tag_id
and tm.post_id = $1`)
if err != nil {
logger.Println(err.Error())
return nil
}
defer stmt.Close()
rows, err := stmt.Query(postID)
if err != nil {
logger.Println(err.Error())
return nil
}
defer rows.Close()
var tags []*Tag
for rows.Next() {
tag := new(Tag)
err = rows.Scan(&tag.TagName, &tag.TagURL)
if err != nil {
logger.Println(err.Error())
continue
}
tags = append(tags, tag)
}
err = rows.Err()
if err != nil {
logger.Printf("error scanning tag row: %s\n", err.Error())
}
return tags
}
func queryComments(db *sql.DB, postID int64) []*Comment {
stmt, err := db.Prepare(`select a.name, a.email, a.www, a.ip,
c.id, c.timestamp, c.body
from commenter as a, comment as c
where a.id = c.commenter_id
and c.post_id = $1
order by c.timestamp asc`)
if err != nil {
logger.Println(err.Error())
return nil
}
defer stmt.Close()
data, err := stmt.Query(postID)
if err != nil {
logger.Println(err.Error())
return nil
}
defer data.Close()
var comments []*Comment
for data.Next() {
comment := new(Comment)
var unixDate int64
err = data.Scan(&comment.Name, &comment.Email, &comment.Website, &comment.IP,
&comment.CommentID, &unixDate, &comment.RawBody)
if err != nil {
logger.Printf("error scanning comment row: %s\n", err.Error())
}
hash := md5.New()
hash.Write([]byte(strings.ToLower(comment.Email)))
comment.EmailHash = fmt.Sprintf("%x", hash.Sum(nil))
comment.Time = time.Unix(unixDate, 0).Format("2006-01-02 15:04")
comment.Body = template.HTML(blackfriday.MarkdownCommon([]byte(comment.RawBody)))
comments = append(comments, comment)
}
err = data.Err()
if err != nil {
logger.Printf("error scanning comment row: %s\n", err.Error())
}
return comments
}
func insertOrGetTagID(xaction *sql.Tx, tag *Tag) (tagID int64, err error) {
query, err := xaction.Prepare("select id from tag where url=$1")
if err != nil {
logger.Println("Failed to prepare select tag stmt: " + err.Error())
return
}
defer query.Close()
err = query.QueryRow(tag.TagURL).Scan(&tagID)
switch err {
case nil:
return
case sql.ErrNoRows:
insertTagSql, err := xaction.Prepare(`insert into tag
(name, url)
values ($1, $2)
returning id`)
if err != nil {
logger.Println("Failed to prepare insert tag stmt: " + err.Error())
return -1, err
}
defer insertTagSql.Close()
err = insertTagSql.QueryRow(tag.TagName, tag.TagURL).Scan(&tagID)
if err != nil {
logger.Println("Failed to insert tag: " + err.Error())
}
return tagID, err
default:
logger.Printf("err: %s", err.Error())
return -1, err
}
}
func updateTagMap(xaction *sql.Tx, postID int64, tagID int64) {
stmt, err := xaction.Prepare(`insert into tagmap
(tag_id, post_id)
values ($1, $2)`)
if err != nil {
logger.Println("Failed to prepare insrt tagmap stmt: " + err.Error())
}
defer stmt.Close()
stmt.Exec(tagID, postID)
}
|
package main
import (
"bufio"
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"strconv"
"strings"
)
const (
TRADB = ".trago.db"
bytes = "abcdefghijklmnopqrstuvwxyz1234567890"
currentDir = "./"
)
type TraDb struct {
replicaId string
version map[string]int
files map[string]FileState
}
type FileState struct {
size int
mtime int64
version int
replica string
// TODO: use a hash as well
}
func main() {
db, err := Parse()
checkError(err)
db.Update()
}
func Parse() (TraDb, error) {
tradb := TraDb{}
version := make(map[string]int)
dbfile, err := os.Open(TRADB)
if os.IsNotExist(err) {
log.Println("didn't find .trago.db")
tradb = *New()
tradb.Write()
return tradb, nil
} else if err != nil {
return tradb, err
}
defer dbfile.Close()
tradb.files = make(map[string]FileState)
scanner := bufio.NewScanner(dbfile)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(line, "#") {
continue
}
fields := strings.Fields(line)
if len(fields) == 0 {
continue
}
switch fields[0] {
case "file": // file name size mtime replica:version
if len(fields) != 5 {
continue
}
size, err := strconv.Atoi(fields[2])
checkError(err)
mtime, err := strconv.ParseInt(fields[3], 10, 64)
checkError(err)
pair := strings.Split(fields[4], ":")
replicaId := pair[0]
ver, err := strconv.Atoi(pair[1])
checkError(err)
tradb.files[fields[1]] = FileState{size, mtime, ver, replicaId}
case "version": // version r1:v1 r2:v2 ...
for _, entry := range fields[1:] {
pair := strings.Split(entry, ":") // replica:version pair
v, err := strconv.Atoi(pair[1])
checkError(err)
version[pair[0]] = v
}
tradb.version = version
case "replica": // replica replica-id
if len(fields) != 2 {
continue
}
tradb.replicaId = fields[1]
}
}
checkError(scanner.Err())
return tradb, nil
}
func New() *TraDb {
replicaId := make([]byte, 16)
version := make(map[string]int)
for i, _ := range replicaId {
replicaId[i] = bytes[rand.Intn(len(bytes))]
}
version[string(replicaId)] = 1
files, err := ioutil.ReadDir(currentDir)
checkError(err)
filemap := make(map[string]FileState)
for _, file := range files {
if file.IsDir() {
continue // ignore directories for now
}
fs := FileState{
size: int(file.Size()),
mtime: file.ModTime().UnixNano(),
version: 1,
replica: string(replicaId),
}
filemap[file.Name()] = fs
}
return &TraDb{string(replicaId), version, filemap}
}
func (tradb *TraDb) Write() {
var pairs []string
for replicaId, version := range tradb.version {
entry := strings.Join([]string{replicaId, strconv.Itoa(version)}, ":")
pairs = append(pairs, entry)
}
versionVector := strings.Join(pairs, " ")
preamble := fmt.Sprintf(
"replica %s\nversion %s\n# files\n",
tradb.replicaId,
versionVector,
)
fileEntries := make([]string, len(tradb.files))
i := 0
for filename, info := range tradb.files {
fileEntries[i] = fmt.Sprintf(
"file %s %d %d %s:%d",
filename,
info.size,
info.mtime,
info.replica,
info.version,
)
i = i + 1
}
entryString := strings.Join(fileEntries, "\n")
dataToWrite := []byte(preamble + entryString)
err := ioutil.WriteFile(TRADB, dataToWrite, 0644)
checkError(err)
}
func checkError(err error) {
if err != nil {
log.Fatal(err)
}
}
update modified files (ignores deleted files)
package main
import (
"bufio"
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"strconv"
"strings"
)
const (
TRADB = ".trago.db"
bytes = "abcdefghijklmnopqrstuvwxyz1234567890"
currentDir = "./"
)
type TraDb struct {
replicaId string
version map[string]int
files map[string]FileState
}
type FileState struct {
size int
mtime int64
version int
replica string
// TODO: use a hash as well
}
func main() {
db, err := Parse()
checkError(err)
db.Update()
}
func Parse() (TraDb, error) {
tradb := TraDb{}
version := make(map[string]int)
dbfile, err := os.Open(TRADB)
if os.IsNotExist(err) {
log.Println("didn't find .trago.db")
tradb = *New()
tradb.Write()
return tradb, nil
} else if err != nil {
return tradb, err
}
defer dbfile.Close()
tradb.files = make(map[string]FileState)
scanner := bufio.NewScanner(dbfile)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(line, "#") {
continue
}
fields := strings.Fields(line)
if len(fields) == 0 {
continue
}
switch fields[0] {
case "file": // file name size mtime replica:version
if len(fields) != 5 {
continue
}
size, err := strconv.Atoi(fields[2])
checkError(err)
mtime, err := strconv.ParseInt(fields[3], 10, 64)
checkError(err)
pair := strings.Split(fields[4], ":")
replicaId := pair[0]
ver, err := strconv.Atoi(pair[1])
checkError(err)
tradb.files[fields[1]] = FileState{size, mtime, ver, replicaId}
case "version": // version r1:v1 r2:v2 ...
for _, entry := range fields[1:] {
pair := strings.Split(entry, ":") // replica:version pair
v, err := strconv.Atoi(pair[1])
checkError(err)
version[pair[0]] = v
}
tradb.version = version
case "replica": // replica replica-id
if len(fields) != 2 {
continue
}
tradb.replicaId = fields[1]
}
}
checkError(scanner.Err())
return tradb, nil
}
func New() *TraDb {
replicaId := make([]byte, 16)
version := make(map[string]int)
for i, _ := range replicaId {
replicaId[i] = bytes[rand.Intn(len(bytes))]
}
version[string(replicaId)] = 1
files, err := ioutil.ReadDir(currentDir)
checkError(err)
filemap := make(map[string]FileState)
for _, file := range files {
if file.IsDir() {
continue // ignore directories for now
}
fs := FileState{
size: int(file.Size()),
mtime: file.ModTime().UnixNano(),
version: 1,
replica: string(replicaId),
}
filemap[file.Name()] = fs
}
return &TraDb{string(replicaId), version, filemap}
}
func (tradb *TraDb) Write() {
var pairs []string
for replicaId, version := range tradb.version {
entry := strings.Join([]string{replicaId, strconv.Itoa(version)}, ":")
pairs = append(pairs, entry)
}
versionVector := strings.Join(pairs, " ")
preamble := fmt.Sprintf(
"replica %s\nversion %s\n# files\n",
tradb.replicaId,
versionVector,
)
fileEntries := make([]string, len(tradb.files))
i := 0
for filename, info := range tradb.files {
fileEntries[i] = fmt.Sprintf(
"file %s %d %d %s:%d",
filename,
info.size,
info.mtime,
info.replica,
info.version,
)
i = i + 1
}
entryString := strings.Join(fileEntries, "\n")
dataToWrite := []byte(preamble + entryString)
err := ioutil.WriteFile(TRADB, dataToWrite, 0644)
checkError(err)
}
func (db *TraDb) Update() {
files, err := ioutil.ReadDir(currentDir)
checkError(err)
for _, file := range files {
if file.IsDir() {
continue
}
filename := file.Name()
dbRecord := db.files[filename]
if dbRecord.mtime == 0 {
log.Printf("found a new file: %s\n", filename)
} else if dbRecord.mtime < file.ModTime().UnixNano() {
log.Printf("found an updated file: %s\n", filename)
dbRecord.mtime = file.ModTime().UnixNano()
dbRecord.version = db.version[db.replicaId]
} else {
log.Printf("file unchanged: %s\n", file.Name())
}
}
}
func checkError(err error) {
if err != nil {
log.Fatal(err)
}
}
|
package hydrocarbon
import (
"context"
"database/sql"
"errors"
_ "github.com/lib/pq"
)
var (
ErrInvalidSession = errors.New("invalid session token")
)
// A DB is responsible for all interactions with postgres
type DB struct {
sql *sql.DB
}
// NewDB returns a new database
func NewDB(dsn string) (*DB, error) {
db, err := sql.Open("postgres", dsn)
if err != nil {
return nil, err
}
err = runMigrations(db)
if err != nil {
return nil, err
}
return &DB{
sql: db,
}, nil
}
// CreateUser creates a new user and returns the users ID
func (db *DB) CreateUser(ctx context.Context, email string) (string, error) {
row := db.sql.QueryRowContext(ctx, `INSERT INTO users
(email)
VALUES ($1)
RETURNING id;`, email)
var userID string
err := row.Scan(&userID)
if err != nil {
return "", err
}
return userID, nil
}
// CreateLoginToken creates a new one-time-use login token
func (db *DB) CreateLoginToken(ctx context.Context, userID string) (string, error) {
row := db.sql.QueryRowContext(ctx, `INSERT INTO login_tokens
(user_id)
VALUES ($1)
RETURNING token;`, userID)
var token string
err := row.Scan(&token)
if err != nil {
return "", err
}
return token, nil
}
// ActivateLoginToken activates the given LoginToken and returns the user
// the token was for
func (db *DB) ActivateLoginToken(ctx context.Context, token string) (string, error) {
row := db.sql.QueryRowContext(ctx, `UPDATE login_tokens
SET (used) = (true)
WHERE token = $1
AND expired_at > now()
RETURNING user_id;`, token)
var userID string
err := row.Scan(&userID)
if err != nil {
return "", err
}
return userID, nil
}
// CreateSession creates a new session for the user ID and returns the
// session token
func (db *DB) CreateSession(ctx context.Context, userID, userAgent, ip string) (string, error) {
row := db.sql.QueryRowContext(ctx, `INSERT INTO sessions
(user_id, user_agent, ip)
VALUES ($1, $2, $3)
RETURNING token;`, userID, userAgent, ip)
var token string
err := row.Scan(&token)
if err != nil {
return "", err
}
return token, nil
}
// DeleteSession invalidates the current session
func (db *DB) DeleteSession(ctx context.Context, token string) error {
_, err := db.sql.QueryContext(ctx, `UPDATE
sessions
SET (active) = (false)
WHERE token = $1;`, token)
return err
}
fix query formatting
package hydrocarbon
import (
"context"
"database/sql"
"errors"
_ "github.com/lib/pq"
)
var (
ErrInvalidSession = errors.New("invalid session token")
)
// A DB is responsible for all interactions with postgres
type DB struct {
sql *sql.DB
}
// NewDB returns a new database
func NewDB(dsn string) (*DB, error) {
db, err := sql.Open("postgres", dsn)
if err != nil {
return nil, err
}
err = runMigrations(db)
if err != nil {
return nil, err
}
return &DB{
sql: db,
}, nil
}
// CreateUser creates a new user and returns the users ID
func (db *DB) CreateUser(ctx context.Context, email string) (string, error) {
row := db.sql.QueryRowContext(ctx, `INSERT INTO users
(email)
VALUES ($1)
RETURNING id;`, email)
var userID string
err := row.Scan(&userID)
if err != nil {
return "", err
}
return userID, nil
}
// CreateLoginToken creates a new one-time-use login token
func (db *DB) CreateLoginToken(ctx context.Context, userID string) (string, error) {
row := db.sql.QueryRowContext(ctx, `INSERT INTO login_tokens
(user_id)
VALUES ($1)
RETURNING token;`, userID)
var token string
err := row.Scan(&token)
if err != nil {
return "", err
}
return token, nil
}
// ActivateLoginToken activates the given LoginToken and returns the user
// the token was for
func (db *DB) ActivateLoginToken(ctx context.Context, token string) (string, error) {
row := db.sql.QueryRowContext(ctx, `UPDATE login_tokens
SET (used) = (true)
WHERE token = $1
AND expired_at > now()
RETURNING user_id;`, token)
var userID string
err := row.Scan(&userID)
if err != nil {
return "", err
}
return userID, nil
}
// CreateSession creates a new session for the user ID and returns the
// session token
func (db *DB) CreateSession(ctx context.Context, userID, userAgent, ip string) (string, error) {
row := db.sql.QueryRowContext(ctx, `INSERT INTO sessions
(user_id, user_agent, ip)
VALUES ($1, $2, $3)
RETURNING token;`, userID, userAgent, ip)
var token string
err := row.Scan(&token)
if err != nil {
return "", err
}
return token, nil
}
// DeleteSession invalidates the current session
func (db *DB) DeleteSession(ctx context.Context, token string) error {
_, err := db.sql.QueryContext(ctx, `UPDATE
sessions
SET (active) = (false)
WHERE token = $1;`, token)
return err
}
|
package gorocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
import "C"
import (
"errors"
"fmt"
"unsafe"
)
// Range is a range of keys in the database. GetApproximateSizes calls with it
// begin at the key Start and end right before the key Limit.
type Range struct {
Start []byte
Limit []byte
}
// DB is a reusable handle to a RocksDB database on disk, created by Open.
type DB struct {
c *C.rocksdb_t
name string
opts *Options
}
// OpenDb opens a database with the specified options.
func OpenDb(opts *Options, name string) (*DB, error) {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
db := C.rocksdb_open(opts.c, cName, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return &DB{
name: name,
c: db,
opts: opts,
}, nil
}
// OpenDbWithTTL opens a database with TTL support with the specified options.
func OpenDbWithTTL(opts *Options, name string, ttl int) (*DB, error) {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
db := C.rocksdb_open_with_ttl(opts.c, cName, C.int(ttl), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return &DB{
name: name,
c: db,
opts: opts,
}, nil
}
// OpenDbForReadOnly opens a database with the specified options for readonly usage.
func OpenDbForReadOnly(opts *Options, name string, errorIfLogFileExist bool) (*DB, error) {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
db := C.rocksdb_open_for_read_only(opts.c, cName, boolToChar(errorIfLogFileExist), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return &DB{
name: name,
c: db,
opts: opts,
}, nil
}
// OpenDbColumnFamilies opens a database with the specified column families.
func OpenDbColumnFamilies(
opts *Options,
name string,
cfNames []string,
cfOpts []*Options,
) (*DB, []*ColumnFamilyHandle, error) {
numColumnFamilies := len(cfNames)
if numColumnFamilies != len(cfOpts) {
return nil, nil, errors.New("must provide the same number of column family names and options")
}
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
cNames := make([]*C.char, numColumnFamilies)
for i, s := range cfNames {
cNames[i] = C.CString(s)
}
defer func() {
for _, s := range cNames {
C.free(unsafe.Pointer(s))
}
}()
cOpts := make([]*C.rocksdb_options_t, numColumnFamilies)
for i, o := range cfOpts {
cOpts[i] = o.c
}
cHandles := make([]*C.rocksdb_column_family_handle_t, numColumnFamilies)
var cErr *C.char
db := C.rocksdb_open_column_families(
opts.c,
cName,
C.int(numColumnFamilies),
&cNames[0],
&cOpts[0],
&cHandles[0],
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, nil, errors.New(C.GoString(cErr))
}
cfHandles := make([]*ColumnFamilyHandle, numColumnFamilies)
for i, c := range cHandles {
cfHandles[i] = NewNativeColumnFamilyHandle(c)
}
return &DB{
name: name,
c: db,
opts: opts,
}, cfHandles, nil
}
// OpenDbForReadOnlyColumnFamilies opens a database with the specified column
// families in read only mode.
func OpenDbForReadOnlyColumnFamilies(
opts *Options,
name string,
cfNames []string,
cfOpts []*Options,
errorIfLogFileExist bool,
) (*DB, []*ColumnFamilyHandle, error) {
numColumnFamilies := len(cfNames)
if numColumnFamilies != len(cfOpts) {
return nil, nil, errors.New("must provide the same number of column family names and options")
}
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
cNames := make([]*C.char, numColumnFamilies)
for i, s := range cfNames {
cNames[i] = C.CString(s)
}
defer func() {
for _, s := range cNames {
C.free(unsafe.Pointer(s))
}
}()
cOpts := make([]*C.rocksdb_options_t, numColumnFamilies)
for i, o := range cfOpts {
cOpts[i] = o.c
}
cHandles := make([]*C.rocksdb_column_family_handle_t, numColumnFamilies)
var cErr *C.char
db := C.rocksdb_open_for_read_only_column_families(
opts.c,
cName,
C.int(numColumnFamilies),
&cNames[0],
&cOpts[0],
&cHandles[0],
boolToChar(errorIfLogFileExist),
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, nil, errors.New(C.GoString(cErr))
}
cfHandles := make([]*ColumnFamilyHandle, numColumnFamilies)
for i, c := range cHandles {
cfHandles[i] = NewNativeColumnFamilyHandle(c)
}
return &DB{
name: name,
c: db,
opts: opts,
}, cfHandles, nil
}
// ListColumnFamilies lists the names of the column families in the DB.
func ListColumnFamilies(opts *Options, name string) ([]string, error) {
var (
cErr *C.char
cLen C.size_t
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
cNames := C.rocksdb_list_column_families(opts.c, cName, &cLen, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
namesLen := int(cLen)
names := make([]string, namesLen)
cNamesArr := (*[1 << 30]*C.char)(unsafe.Pointer(cNames))[:namesLen:namesLen]
for i, n := range cNamesArr {
names[i] = C.GoString(n)
}
C.rocksdb_list_column_families_destroy(cNames, cLen)
return names, nil
}
// UnsafeGetDB returns the underlying c rocksdb instance.
func (db *DB) UnsafeGetDB() unsafe.Pointer {
return unsafe.Pointer(db.c)
}
// Name returns the name of the database.
func (db *DB) Name() string {
return db.name
}
// Get returns the data associated with the key from the database.
func (db *DB) Get(opts *ReadOptions, key []byte) (*Slice, error) {
var (
cErr *C.char
cValLen C.size_t
cKey = byteToChar(key)
)
cValue := C.rocksdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewSlice(cValue, cValLen), nil
}
// GetBytes is like Get but returns a copy of the data.
func (db *DB) GetBytes(opts *ReadOptions, key []byte) ([]byte, error) {
var (
cErr *C.char
cValLen C.size_t
cKey = byteToChar(key)
)
cValue := C.rocksdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
if cValue == nil {
return nil, nil
}
defer C.free(unsafe.Pointer(cValue))
return C.GoBytes(unsafe.Pointer(cValue), C.int(cValLen)), nil
}
// GetCF returns the data associated with the key from the database and column family.
func (db *DB) GetCF(opts *ReadOptions, cf *ColumnFamilyHandle, key []byte) (*Slice, error) {
var (
cErr *C.char
cValLen C.size_t
cKey = byteToChar(key)
)
cValue := C.rocksdb_get_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), &cValLen, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewSlice(cValue, cValLen), nil
}
// GetPinned returns the data associated with the key from the database.
func (db *DB) GetPinned(opts *ReadOptions, key []byte) (*PinnableSliceHandle, error) {
var (
cErr *C.char
cKey = byteToChar(key)
)
cHandle := C.rocksdb_get_pinned(db.c, opts.c, cKey, C.size_t(len(key)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewNativePinnableSliceHandle(cHandle), nil
}
// MultiGet returns the data associated with the passed keys from the database
func (db *DB) MultiGet(opts *ReadOptions, keys ...[]byte) (Slices, error) {
cKeys, cKeySizes := byteSlicesToCSlices(keys)
defer cKeys.Destroy()
vals := make(charsSlice, len(keys))
valSizes := make(sizeTSlice, len(keys))
rocksErrs := make(charsSlice, len(keys))
C.rocksdb_multi_get(
db.c,
opts.c,
C.size_t(len(keys)),
cKeys.c(),
cKeySizes.c(),
vals.c(),
valSizes.c(),
rocksErrs.c(),
)
var errs []error
for i, rocksErr := range rocksErrs {
if rocksErr != nil {
defer C.free(unsafe.Pointer(rocksErr))
err := fmt.Errorf("getting %q failed: %v", string(keys[i]), C.GoString(rocksErr))
errs = append(errs, err)
}
}
if len(errs) > 0 {
return nil, fmt.Errorf("failed to get %d keys, first error: %v", len(errs), errs[0])
}
slices := make(Slices, len(keys))
for i, val := range vals {
slices[i] = NewSlice(val, valSizes[i])
}
return slices, nil
}
// MultiGetCF returns the data associated with the passed keys from the column family
func (db *DB) MultiGetCF(opts *ReadOptions, cf *ColumnFamilyHandle, keys ...[]byte) (Slices, error) {
cfs := make(ColumnFamilyHandles, len(keys))
for i := 0; i < len(keys); i++ {
cfs[i] = cf
}
return db.MultiGetCFMultiCF(opts, cfs, keys)
}
// MultiGetCFMultiCF returns the data associated with the passed keys and
// column families.
func (db *DB) MultiGetCFMultiCF(opts *ReadOptions, cfs ColumnFamilyHandles, keys [][]byte) (Slices, error) {
cKeys, cKeySizes := byteSlicesToCSlices(keys)
defer cKeys.Destroy()
vals := make(charsSlice, len(keys))
valSizes := make(sizeTSlice, len(keys))
rocksErrs := make(charsSlice, len(keys))
C.rocksdb_multi_get_cf(
db.c,
opts.c,
cfs.toCSlice().c(),
C.size_t(len(keys)),
cKeys.c(),
cKeySizes.c(),
vals.c(),
valSizes.c(),
rocksErrs.c(),
)
var errs []error
for i, rocksErr := range rocksErrs {
if rocksErr != nil {
defer C.free(unsafe.Pointer(rocksErr))
err := fmt.Errorf("getting %q failed: %v", string(keys[i]), C.GoString(rocksErr))
errs = append(errs, err)
}
}
if len(errs) > 0 {
return nil, fmt.Errorf("failed to get %d keys, first error: %v", len(errs), errs[0])
}
slices := make(Slices, len(keys))
for i, val := range vals {
slices[i] = NewSlice(val, valSizes[i])
}
return slices, nil
}
// Put writes data associated with a key to the database.
func (db *DB) Put(opts *WriteOptions, key, value []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
cValue = byteToChar(value)
)
C.rocksdb_put(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// PutCF writes data associated with a key to the database and column family.
func (db *DB) PutCF(opts *WriteOptions, cf *ColumnFamilyHandle, key, value []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
cValue = byteToChar(value)
)
C.rocksdb_put_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// Delete removes the data associated with the key from the database.
func (db *DB) Delete(opts *WriteOptions, key []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
)
C.rocksdb_delete(db.c, opts.c, cKey, C.size_t(len(key)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// DeleteCF removes the data associated with the key from the database and column family.
func (db *DB) DeleteCF(opts *WriteOptions, cf *ColumnFamilyHandle, key []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
)
C.rocksdb_delete_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// Merge merges the data associated with the key with the actual data in the database.
func (db *DB) Merge(opts *WriteOptions, key []byte, value []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
cValue = byteToChar(value)
)
C.rocksdb_merge(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// MergeCF merges the data associated with the key with the actual data in the
// database and column family.
func (db *DB) MergeCF(opts *WriteOptions, cf *ColumnFamilyHandle, key []byte, value []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
cValue = byteToChar(value)
)
C.rocksdb_merge_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// Write writes a WriteBatch to the database
func (db *DB) Write(opts *WriteOptions, batch *WriteBatch) error {
var cErr *C.char
C.rocksdb_write(db.c, opts.c, batch.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// NewIterator returns an Iterator over the the database that uses the
// ReadOptions given.
func (db *DB) NewIterator(opts *ReadOptions) *Iterator {
cIter := C.rocksdb_create_iterator(db.c, opts.c)
return NewNativeIterator(unsafe.Pointer(cIter))
}
// NewIteratorCF returns an Iterator over the the database and column family
// that uses the ReadOptions given.
func (db *DB) NewIteratorCF(opts *ReadOptions, cf *ColumnFamilyHandle) *Iterator {
cIter := C.rocksdb_create_iterator_cf(db.c, opts.c, cf.c)
return NewNativeIterator(unsafe.Pointer(cIter))
}
func (db *DB) GetUpdatesSince(seqNumber uint64) (*WalIterator, error) {
var cErr *C.char
cIter := C.rocksdb_get_updates_since(db.c, C.uint64_t(seqNumber), nil, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewNativeWalIterator(unsafe.Pointer(cIter)), nil
}
func (db *DB) GetLatestSequenceNumber() uint64 {
return uint64(C.rocksdb_get_latest_sequence_number(db.c))
}
// NewSnapshot creates a new snapshot of the database.
func (db *DB) NewSnapshot() *Snapshot {
cSnap := C.rocksdb_create_snapshot(db.c)
return NewNativeSnapshot(cSnap)
}
// ReleaseSnapshot releases the snapshot and its resources.
func (db *DB) ReleaseSnapshot(snapshot *Snapshot) {
C.rocksdb_release_snapshot(db.c, snapshot.c)
snapshot.c = nil
}
// GetProperty returns the value of a database property.
func (db *DB) GetProperty(propName string) string {
cprop := C.CString(propName)
defer C.free(unsafe.Pointer(cprop))
cValue := C.rocksdb_property_value(db.c, cprop)
defer C.free(unsafe.Pointer(cValue))
return C.GoString(cValue)
}
// GetPropertyCF returns the value of a database property.
func (db *DB) GetPropertyCF(propName string, cf *ColumnFamilyHandle) string {
cProp := C.CString(propName)
defer C.free(unsafe.Pointer(cProp))
cValue := C.rocksdb_property_value_cf(db.c, cf.c, cProp)
defer C.free(unsafe.Pointer(cValue))
return C.GoString(cValue)
}
// CreateColumnFamily create a new column family.
func (db *DB) CreateColumnFamily(opts *Options, name string) (*ColumnFamilyHandle, error) {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
cHandle := C.rocksdb_create_column_family(db.c, opts.c, cName, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewNativeColumnFamilyHandle(cHandle), nil
}
// DropColumnFamily drops a column family.
func (db *DB) DropColumnFamily(c *ColumnFamilyHandle) error {
var cErr *C.char
C.rocksdb_drop_column_family(db.c, c.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// GetApproximateSizes returns the approximate number of bytes of file system
// space used by one or more key ranges.
//
// The keys counted will begin at Range.Start and end on the key before
// Range.Limit.
func (db *DB) GetApproximateSizes(ranges []Range) []uint64 {
sizes := make([]uint64, len(ranges))
if len(ranges) == 0 {
return sizes
}
cStarts := make([]*C.char, len(ranges))
cLimits := make([]*C.char, len(ranges))
cStartLens := make([]C.size_t, len(ranges))
cLimitLens := make([]C.size_t, len(ranges))
for i, r := range ranges {
cStarts[i] = (*C.char)(C.CBytes(r.Start))
cStartLens[i] = C.size_t(len(r.Start))
cLimits[i] = (*C.char)(C.CBytes(r.Limit))
cLimitLens[i] = C.size_t(len(r.Limit))
}
defer func() {
for i := range ranges {
C.free(unsafe.Pointer(cStarts[i]))
C.free(unsafe.Pointer(cLimits[i]))
}
}()
C.rocksdb_approximate_sizes(
db.c,
C.int(len(ranges)),
&cStarts[0],
&cStartLens[0],
&cLimits[0],
&cLimitLens[0],
(*C.uint64_t)(&sizes[0]))
return sizes
}
// GetApproximateSizesCF returns the approximate number of bytes of file system
// space used by one or more key ranges in the column family.
//
// The keys counted will begin at Range.Start and end on the key before
// Range.Limit.
func (db *DB) GetApproximateSizesCF(cf *ColumnFamilyHandle, ranges []Range) []uint64 {
sizes := make([]uint64, len(ranges))
if len(ranges) == 0 {
return sizes
}
cStarts := make([]*C.char, len(ranges))
cLimits := make([]*C.char, len(ranges))
cStartLens := make([]C.size_t, len(ranges))
cLimitLens := make([]C.size_t, len(ranges))
for i, r := range ranges {
cStarts[i] = (*C.char)(C.CBytes(r.Start))
cStartLens[i] = C.size_t(len(r.Start))
cLimits[i] = (*C.char)(C.CBytes(r.Limit))
cLimitLens[i] = C.size_t(len(r.Limit))
}
defer func() {
for i := range ranges {
C.free(unsafe.Pointer(cStarts[i]))
C.free(unsafe.Pointer(cLimits[i]))
}
}()
C.rocksdb_approximate_sizes_cf(
db.c,
cf.c,
C.int(len(ranges)),
&cStarts[0],
&cStartLens[0],
&cLimits[0],
&cLimitLens[0],
(*C.uint64_t)(&sizes[0]))
return sizes
}
// SetOptions dynamically changes options through the SetOptions API.
func (db *DB) SetOptions(keys, values []string) error {
num_keys := len(keys)
if num_keys == 0 {
return nil
}
cKeys := make([]*C.char, num_keys)
cValues := make([]*C.char, num_keys)
for i := range keys {
cKeys[i] = C.CString(keys[i])
cValues[i] = C.CString(values[i])
}
var cErr *C.char
C.rocksdb_set_options(
db.c,
C.int(num_keys),
&cKeys[0],
&cValues[0],
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// LiveFileMetadata is a metadata which is associated with each SST file.
type LiveFileMetadata struct {
Name string
Level int
Size int64
SmallestKey []byte
LargestKey []byte
}
// GetLiveFilesMetaData returns a list of all table files with their
// level, start key and end key.
func (db *DB) GetLiveFilesMetaData() []LiveFileMetadata {
lf := C.rocksdb_livefiles(db.c)
defer C.rocksdb_livefiles_destroy(lf)
count := C.rocksdb_livefiles_count(lf)
liveFiles := make([]LiveFileMetadata, int(count))
for i := C.int(0); i < count; i++ {
var liveFile LiveFileMetadata
liveFile.Name = C.GoString(C.rocksdb_livefiles_name(lf, i))
liveFile.Level = int(C.rocksdb_livefiles_level(lf, i))
liveFile.Size = int64(C.rocksdb_livefiles_size(lf, i))
var cSize C.size_t
key := C.rocksdb_livefiles_smallestkey(lf, i, &cSize)
liveFile.SmallestKey = C.GoBytes(unsafe.Pointer(key), C.int(cSize))
key = C.rocksdb_livefiles_largestkey(lf, i, &cSize)
liveFile.LargestKey = C.GoBytes(unsafe.Pointer(key), C.int(cSize))
liveFiles[int(i)] = liveFile
}
return liveFiles
}
// CompactRange runs a manual compaction on the Range of keys given. This is
// not likely to be needed for typical usage.
func (db *DB) CompactRange(r Range) {
cStart := byteToChar(r.Start)
cLimit := byteToChar(r.Limit)
C.rocksdb_compact_range(db.c, cStart, C.size_t(len(r.Start)), cLimit, C.size_t(len(r.Limit)))
}
// CompactRangeCF runs a manual compaction on the Range of keys given on the
// given column family. This is not likely to be needed for typical usage.
func (db *DB) CompactRangeCF(cf *ColumnFamilyHandle, r Range) {
cStart := byteToChar(r.Start)
cLimit := byteToChar(r.Limit)
C.rocksdb_compact_range_cf(db.c, cf.c, cStart, C.size_t(len(r.Start)), cLimit, C.size_t(len(r.Limit)))
}
// Flush triggers a manuel flush for the database.
func (db *DB) Flush(opts *FlushOptions) error {
var cErr *C.char
C.rocksdb_flush(db.c, opts.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// DisableFileDeletions disables file deletions and should be used when backup the database.
func (db *DB) DisableFileDeletions() error {
var cErr *C.char
C.rocksdb_disable_file_deletions(db.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// EnableFileDeletions enables file deletions for the database.
func (db *DB) EnableFileDeletions(force bool) error {
var cErr *C.char
C.rocksdb_enable_file_deletions(db.c, boolToChar(force), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// DeleteFile deletes the file name from the db directory and update the internal state to
// reflect that. Supports deletion of sst and log files only. 'name' must be
// path relative to the db directory. eg. 000001.sst, /archive/000003.log.
func (db *DB) DeleteFile(name string) {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
C.rocksdb_delete_file(db.c, cName)
}
// DeleteFileInRange deletes SST files that contain keys between the Range, [r.Start, limitKey]
func (db *DB) DeleteFileInRange(r Range) error {
cStartKey := byteToChar(r.Start)
cLimitKey := byteToChar(r.Limit)
var cErr *C.char
C.rocksdb_delete_file_in_range(
db.c,
cStartKey, C.size_t(len(r.Start)),
cLimitKey, C.size_t(len(r.Limit)),
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// DeleteFileInRangeCF deletes SST files that contain keys between the Range, [r.Start, r.Limit], and
// belong to a given column family
func (db *DB) DeleteFileInRangeCF(cf *ColumnFamilyHandle, r Range) error {
cStartKey := byteToChar(r.Start)
cLimitKey := byteToChar(r.Limit)
var cErr *C.char
C.rocksdb_delete_file_in_range_cf(
db.c,
cf.c,
cStartKey, C.size_t(len(r.Start)),
cLimitKey, C.size_t(len(r.Limit)),
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// IngestExternalFile loads a list of external SST files.
func (db *DB) IngestExternalFile(filePaths []string, opts *IngestExternalFileOptions) error {
cFilePaths := make([]*C.char, len(filePaths))
for i, s := range filePaths {
cFilePaths[i] = C.CString(s)
}
defer func() {
for _, s := range cFilePaths {
C.free(unsafe.Pointer(s))
}
}()
var cErr *C.char
C.rocksdb_ingest_external_file(
db.c,
&cFilePaths[0],
C.size_t(len(filePaths)),
opts.c,
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// IngestExternalFileCF loads a list of external SST files for a column family.
func (db *DB) IngestExternalFileCF(handle *ColumnFamilyHandle, filePaths []string, opts *IngestExternalFileOptions) error {
cFilePaths := make([]*C.char, len(filePaths))
for i, s := range filePaths {
cFilePaths[i] = C.CString(s)
}
defer func() {
for _, s := range cFilePaths {
C.free(unsafe.Pointer(s))
}
}()
var cErr *C.char
C.rocksdb_ingest_external_file_cf(
db.c,
handle.c,
&cFilePaths[0],
C.size_t(len(filePaths)),
opts.c,
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// NewCheckpoint creates a new Checkpoint for this db.
func (db *DB) NewCheckpoint() (*Checkpoint, error) {
var (
cErr *C.char
)
cCheckpoint := C.rocksdb_checkpoint_object_create(
db.c, &cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewNativeCheckpoint(cCheckpoint), nil
}
// Close closes the database.
func (db *DB) Close() {
C.rocksdb_close(db.c)
}
// DestroyDb removes a database entirely, removing everything from the
// filesystem.
func DestroyDb(name string, opts *Options) error {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
C.rocksdb_destroy_db(opts.c, cName, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// RepairDb repairs a database.
func RepairDb(name string, opts *Options) error {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
C.rocksdb_repair_db(opts.c, cName, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
Workaround for Linux 32-bit build
package gorocksdb
// #include <stdlib.h>
// #include "rocksdb/c.h"
import "C"
import (
"errors"
"fmt"
"unsafe"
)
// Range is a range of keys in the database. GetApproximateSizes calls with it
// begin at the key Start and end right before the key Limit.
type Range struct {
Start []byte
Limit []byte
}
// DB is a reusable handle to a RocksDB database on disk, created by Open.
type DB struct {
c *C.rocksdb_t
name string
opts *Options
}
// OpenDb opens a database with the specified options.
func OpenDb(opts *Options, name string) (*DB, error) {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
db := C.rocksdb_open(opts.c, cName, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return &DB{
name: name,
c: db,
opts: opts,
}, nil
}
// OpenDbWithTTL opens a database with TTL support with the specified options.
func OpenDbWithTTL(opts *Options, name string, ttl int) (*DB, error) {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
db := C.rocksdb_open_with_ttl(opts.c, cName, C.int(ttl), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return &DB{
name: name,
c: db,
opts: opts,
}, nil
}
// OpenDbForReadOnly opens a database with the specified options for readonly usage.
func OpenDbForReadOnly(opts *Options, name string, errorIfLogFileExist bool) (*DB, error) {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
db := C.rocksdb_open_for_read_only(opts.c, cName, boolToChar(errorIfLogFileExist), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return &DB{
name: name,
c: db,
opts: opts,
}, nil
}
// OpenDbColumnFamilies opens a database with the specified column families.
func OpenDbColumnFamilies(
opts *Options,
name string,
cfNames []string,
cfOpts []*Options,
) (*DB, []*ColumnFamilyHandle, error) {
numColumnFamilies := len(cfNames)
if numColumnFamilies != len(cfOpts) {
return nil, nil, errors.New("must provide the same number of column family names and options")
}
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
cNames := make([]*C.char, numColumnFamilies)
for i, s := range cfNames {
cNames[i] = C.CString(s)
}
defer func() {
for _, s := range cNames {
C.free(unsafe.Pointer(s))
}
}()
cOpts := make([]*C.rocksdb_options_t, numColumnFamilies)
for i, o := range cfOpts {
cOpts[i] = o.c
}
cHandles := make([]*C.rocksdb_column_family_handle_t, numColumnFamilies)
var cErr *C.char
db := C.rocksdb_open_column_families(
opts.c,
cName,
C.int(numColumnFamilies),
&cNames[0],
&cOpts[0],
&cHandles[0],
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, nil, errors.New(C.GoString(cErr))
}
cfHandles := make([]*ColumnFamilyHandle, numColumnFamilies)
for i, c := range cHandles {
cfHandles[i] = NewNativeColumnFamilyHandle(c)
}
return &DB{
name: name,
c: db,
opts: opts,
}, cfHandles, nil
}
// OpenDbForReadOnlyColumnFamilies opens a database with the specified column
// families in read only mode.
func OpenDbForReadOnlyColumnFamilies(
opts *Options,
name string,
cfNames []string,
cfOpts []*Options,
errorIfLogFileExist bool,
) (*DB, []*ColumnFamilyHandle, error) {
numColumnFamilies := len(cfNames)
if numColumnFamilies != len(cfOpts) {
return nil, nil, errors.New("must provide the same number of column family names and options")
}
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
cNames := make([]*C.char, numColumnFamilies)
for i, s := range cfNames {
cNames[i] = C.CString(s)
}
defer func() {
for _, s := range cNames {
C.free(unsafe.Pointer(s))
}
}()
cOpts := make([]*C.rocksdb_options_t, numColumnFamilies)
for i, o := range cfOpts {
cOpts[i] = o.c
}
cHandles := make([]*C.rocksdb_column_family_handle_t, numColumnFamilies)
var cErr *C.char
db := C.rocksdb_open_for_read_only_column_families(
opts.c,
cName,
C.int(numColumnFamilies),
&cNames[0],
&cOpts[0],
&cHandles[0],
boolToChar(errorIfLogFileExist),
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, nil, errors.New(C.GoString(cErr))
}
cfHandles := make([]*ColumnFamilyHandle, numColumnFamilies)
for i, c := range cHandles {
cfHandles[i] = NewNativeColumnFamilyHandle(c)
}
return &DB{
name: name,
c: db,
opts: opts,
}, cfHandles, nil
}
// ListColumnFamilies lists the names of the column families in the DB.
func ListColumnFamilies(opts *Options, name string) ([]string, error) {
var (
cErr *C.char
cLen C.size_t
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
cNames := C.rocksdb_list_column_families(opts.c, cName, &cLen, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
namesLen := int(cLen)
names := make([]string, namesLen)
// The maximum capacity of the following two slices is limited to (2^29)-1 to remain compatible
// with 32-bit platforms. The size of a `*C.char` (a pointer) is 4 Byte on a 32-bit system
// and (2^29)*4 == math.MaxInt32 + 1. -- See issue golang/go#13656
cNamesArr := (*[(1 << 29) - 1]*C.char)(unsafe.Pointer(cNames))[:namesLen:namesLen]
for i, n := range cNamesArr {
names[i] = C.GoString(n)
}
C.rocksdb_list_column_families_destroy(cNames, cLen)
return names, nil
}
// UnsafeGetDB returns the underlying c rocksdb instance.
func (db *DB) UnsafeGetDB() unsafe.Pointer {
return unsafe.Pointer(db.c)
}
// Name returns the name of the database.
func (db *DB) Name() string {
return db.name
}
// Get returns the data associated with the key from the database.
func (db *DB) Get(opts *ReadOptions, key []byte) (*Slice, error) {
var (
cErr *C.char
cValLen C.size_t
cKey = byteToChar(key)
)
cValue := C.rocksdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewSlice(cValue, cValLen), nil
}
// GetBytes is like Get but returns a copy of the data.
func (db *DB) GetBytes(opts *ReadOptions, key []byte) ([]byte, error) {
var (
cErr *C.char
cValLen C.size_t
cKey = byteToChar(key)
)
cValue := C.rocksdb_get(db.c, opts.c, cKey, C.size_t(len(key)), &cValLen, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
if cValue == nil {
return nil, nil
}
defer C.free(unsafe.Pointer(cValue))
return C.GoBytes(unsafe.Pointer(cValue), C.int(cValLen)), nil
}
// GetCF returns the data associated with the key from the database and column family.
func (db *DB) GetCF(opts *ReadOptions, cf *ColumnFamilyHandle, key []byte) (*Slice, error) {
var (
cErr *C.char
cValLen C.size_t
cKey = byteToChar(key)
)
cValue := C.rocksdb_get_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), &cValLen, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewSlice(cValue, cValLen), nil
}
// GetPinned returns the data associated with the key from the database.
func (db *DB) GetPinned(opts *ReadOptions, key []byte) (*PinnableSliceHandle, error) {
var (
cErr *C.char
cKey = byteToChar(key)
)
cHandle := C.rocksdb_get_pinned(db.c, opts.c, cKey, C.size_t(len(key)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewNativePinnableSliceHandle(cHandle), nil
}
// MultiGet returns the data associated with the passed keys from the database
func (db *DB) MultiGet(opts *ReadOptions, keys ...[]byte) (Slices, error) {
cKeys, cKeySizes := byteSlicesToCSlices(keys)
defer cKeys.Destroy()
vals := make(charsSlice, len(keys))
valSizes := make(sizeTSlice, len(keys))
rocksErrs := make(charsSlice, len(keys))
C.rocksdb_multi_get(
db.c,
opts.c,
C.size_t(len(keys)),
cKeys.c(),
cKeySizes.c(),
vals.c(),
valSizes.c(),
rocksErrs.c(),
)
var errs []error
for i, rocksErr := range rocksErrs {
if rocksErr != nil {
defer C.free(unsafe.Pointer(rocksErr))
err := fmt.Errorf("getting %q failed: %v", string(keys[i]), C.GoString(rocksErr))
errs = append(errs, err)
}
}
if len(errs) > 0 {
return nil, fmt.Errorf("failed to get %d keys, first error: %v", len(errs), errs[0])
}
slices := make(Slices, len(keys))
for i, val := range vals {
slices[i] = NewSlice(val, valSizes[i])
}
return slices, nil
}
// MultiGetCF returns the data associated with the passed keys from the column family
func (db *DB) MultiGetCF(opts *ReadOptions, cf *ColumnFamilyHandle, keys ...[]byte) (Slices, error) {
cfs := make(ColumnFamilyHandles, len(keys))
for i := 0; i < len(keys); i++ {
cfs[i] = cf
}
return db.MultiGetCFMultiCF(opts, cfs, keys)
}
// MultiGetCFMultiCF returns the data associated with the passed keys and
// column families.
func (db *DB) MultiGetCFMultiCF(opts *ReadOptions, cfs ColumnFamilyHandles, keys [][]byte) (Slices, error) {
cKeys, cKeySizes := byteSlicesToCSlices(keys)
defer cKeys.Destroy()
vals := make(charsSlice, len(keys))
valSizes := make(sizeTSlice, len(keys))
rocksErrs := make(charsSlice, len(keys))
C.rocksdb_multi_get_cf(
db.c,
opts.c,
cfs.toCSlice().c(),
C.size_t(len(keys)),
cKeys.c(),
cKeySizes.c(),
vals.c(),
valSizes.c(),
rocksErrs.c(),
)
var errs []error
for i, rocksErr := range rocksErrs {
if rocksErr != nil {
defer C.free(unsafe.Pointer(rocksErr))
err := fmt.Errorf("getting %q failed: %v", string(keys[i]), C.GoString(rocksErr))
errs = append(errs, err)
}
}
if len(errs) > 0 {
return nil, fmt.Errorf("failed to get %d keys, first error: %v", len(errs), errs[0])
}
slices := make(Slices, len(keys))
for i, val := range vals {
slices[i] = NewSlice(val, valSizes[i])
}
return slices, nil
}
// Put writes data associated with a key to the database.
func (db *DB) Put(opts *WriteOptions, key, value []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
cValue = byteToChar(value)
)
C.rocksdb_put(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// PutCF writes data associated with a key to the database and column family.
func (db *DB) PutCF(opts *WriteOptions, cf *ColumnFamilyHandle, key, value []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
cValue = byteToChar(value)
)
C.rocksdb_put_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// Delete removes the data associated with the key from the database.
func (db *DB) Delete(opts *WriteOptions, key []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
)
C.rocksdb_delete(db.c, opts.c, cKey, C.size_t(len(key)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// DeleteCF removes the data associated with the key from the database and column family.
func (db *DB) DeleteCF(opts *WriteOptions, cf *ColumnFamilyHandle, key []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
)
C.rocksdb_delete_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// Merge merges the data associated with the key with the actual data in the database.
func (db *DB) Merge(opts *WriteOptions, key []byte, value []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
cValue = byteToChar(value)
)
C.rocksdb_merge(db.c, opts.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// MergeCF merges the data associated with the key with the actual data in the
// database and column family.
func (db *DB) MergeCF(opts *WriteOptions, cf *ColumnFamilyHandle, key []byte, value []byte) error {
var (
cErr *C.char
cKey = byteToChar(key)
cValue = byteToChar(value)
)
C.rocksdb_merge_cf(db.c, opts.c, cf.c, cKey, C.size_t(len(key)), cValue, C.size_t(len(value)), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// Write writes a WriteBatch to the database
func (db *DB) Write(opts *WriteOptions, batch *WriteBatch) error {
var cErr *C.char
C.rocksdb_write(db.c, opts.c, batch.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// NewIterator returns an Iterator over the the database that uses the
// ReadOptions given.
func (db *DB) NewIterator(opts *ReadOptions) *Iterator {
cIter := C.rocksdb_create_iterator(db.c, opts.c)
return NewNativeIterator(unsafe.Pointer(cIter))
}
// NewIteratorCF returns an Iterator over the the database and column family
// that uses the ReadOptions given.
func (db *DB) NewIteratorCF(opts *ReadOptions, cf *ColumnFamilyHandle) *Iterator {
cIter := C.rocksdb_create_iterator_cf(db.c, opts.c, cf.c)
return NewNativeIterator(unsafe.Pointer(cIter))
}
func (db *DB) GetUpdatesSince(seqNumber uint64) (*WalIterator, error) {
var cErr *C.char
cIter := C.rocksdb_get_updates_since(db.c, C.uint64_t(seqNumber), nil, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewNativeWalIterator(unsafe.Pointer(cIter)), nil
}
func (db *DB) GetLatestSequenceNumber() uint64 {
return uint64(C.rocksdb_get_latest_sequence_number(db.c))
}
// NewSnapshot creates a new snapshot of the database.
func (db *DB) NewSnapshot() *Snapshot {
cSnap := C.rocksdb_create_snapshot(db.c)
return NewNativeSnapshot(cSnap)
}
// ReleaseSnapshot releases the snapshot and its resources.
func (db *DB) ReleaseSnapshot(snapshot *Snapshot) {
C.rocksdb_release_snapshot(db.c, snapshot.c)
snapshot.c = nil
}
// GetProperty returns the value of a database property.
func (db *DB) GetProperty(propName string) string {
cprop := C.CString(propName)
defer C.free(unsafe.Pointer(cprop))
cValue := C.rocksdb_property_value(db.c, cprop)
defer C.free(unsafe.Pointer(cValue))
return C.GoString(cValue)
}
// GetPropertyCF returns the value of a database property.
func (db *DB) GetPropertyCF(propName string, cf *ColumnFamilyHandle) string {
cProp := C.CString(propName)
defer C.free(unsafe.Pointer(cProp))
cValue := C.rocksdb_property_value_cf(db.c, cf.c, cProp)
defer C.free(unsafe.Pointer(cValue))
return C.GoString(cValue)
}
// CreateColumnFamily create a new column family.
func (db *DB) CreateColumnFamily(opts *Options, name string) (*ColumnFamilyHandle, error) {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
cHandle := C.rocksdb_create_column_family(db.c, opts.c, cName, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewNativeColumnFamilyHandle(cHandle), nil
}
// DropColumnFamily drops a column family.
func (db *DB) DropColumnFamily(c *ColumnFamilyHandle) error {
var cErr *C.char
C.rocksdb_drop_column_family(db.c, c.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// GetApproximateSizes returns the approximate number of bytes of file system
// space used by one or more key ranges.
//
// The keys counted will begin at Range.Start and end on the key before
// Range.Limit.
func (db *DB) GetApproximateSizes(ranges []Range) []uint64 {
sizes := make([]uint64, len(ranges))
if len(ranges) == 0 {
return sizes
}
cStarts := make([]*C.char, len(ranges))
cLimits := make([]*C.char, len(ranges))
cStartLens := make([]C.size_t, len(ranges))
cLimitLens := make([]C.size_t, len(ranges))
for i, r := range ranges {
cStarts[i] = (*C.char)(C.CBytes(r.Start))
cStartLens[i] = C.size_t(len(r.Start))
cLimits[i] = (*C.char)(C.CBytes(r.Limit))
cLimitLens[i] = C.size_t(len(r.Limit))
}
defer func() {
for i := range ranges {
C.free(unsafe.Pointer(cStarts[i]))
C.free(unsafe.Pointer(cLimits[i]))
}
}()
C.rocksdb_approximate_sizes(
db.c,
C.int(len(ranges)),
&cStarts[0],
&cStartLens[0],
&cLimits[0],
&cLimitLens[0],
(*C.uint64_t)(&sizes[0]))
return sizes
}
// GetApproximateSizesCF returns the approximate number of bytes of file system
// space used by one or more key ranges in the column family.
//
// The keys counted will begin at Range.Start and end on the key before
// Range.Limit.
func (db *DB) GetApproximateSizesCF(cf *ColumnFamilyHandle, ranges []Range) []uint64 {
sizes := make([]uint64, len(ranges))
if len(ranges) == 0 {
return sizes
}
cStarts := make([]*C.char, len(ranges))
cLimits := make([]*C.char, len(ranges))
cStartLens := make([]C.size_t, len(ranges))
cLimitLens := make([]C.size_t, len(ranges))
for i, r := range ranges {
cStarts[i] = (*C.char)(C.CBytes(r.Start))
cStartLens[i] = C.size_t(len(r.Start))
cLimits[i] = (*C.char)(C.CBytes(r.Limit))
cLimitLens[i] = C.size_t(len(r.Limit))
}
defer func() {
for i := range ranges {
C.free(unsafe.Pointer(cStarts[i]))
C.free(unsafe.Pointer(cLimits[i]))
}
}()
C.rocksdb_approximate_sizes_cf(
db.c,
cf.c,
C.int(len(ranges)),
&cStarts[0],
&cStartLens[0],
&cLimits[0],
&cLimitLens[0],
(*C.uint64_t)(&sizes[0]))
return sizes
}
// SetOptions dynamically changes options through the SetOptions API.
func (db *DB) SetOptions(keys, values []string) error {
num_keys := len(keys)
if num_keys == 0 {
return nil
}
cKeys := make([]*C.char, num_keys)
cValues := make([]*C.char, num_keys)
for i := range keys {
cKeys[i] = C.CString(keys[i])
cValues[i] = C.CString(values[i])
}
var cErr *C.char
C.rocksdb_set_options(
db.c,
C.int(num_keys),
&cKeys[0],
&cValues[0],
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// LiveFileMetadata is a metadata which is associated with each SST file.
type LiveFileMetadata struct {
Name string
Level int
Size int64
SmallestKey []byte
LargestKey []byte
}
// GetLiveFilesMetaData returns a list of all table files with their
// level, start key and end key.
func (db *DB) GetLiveFilesMetaData() []LiveFileMetadata {
lf := C.rocksdb_livefiles(db.c)
defer C.rocksdb_livefiles_destroy(lf)
count := C.rocksdb_livefiles_count(lf)
liveFiles := make([]LiveFileMetadata, int(count))
for i := C.int(0); i < count; i++ {
var liveFile LiveFileMetadata
liveFile.Name = C.GoString(C.rocksdb_livefiles_name(lf, i))
liveFile.Level = int(C.rocksdb_livefiles_level(lf, i))
liveFile.Size = int64(C.rocksdb_livefiles_size(lf, i))
var cSize C.size_t
key := C.rocksdb_livefiles_smallestkey(lf, i, &cSize)
liveFile.SmallestKey = C.GoBytes(unsafe.Pointer(key), C.int(cSize))
key = C.rocksdb_livefiles_largestkey(lf, i, &cSize)
liveFile.LargestKey = C.GoBytes(unsafe.Pointer(key), C.int(cSize))
liveFiles[int(i)] = liveFile
}
return liveFiles
}
// CompactRange runs a manual compaction on the Range of keys given. This is
// not likely to be needed for typical usage.
func (db *DB) CompactRange(r Range) {
cStart := byteToChar(r.Start)
cLimit := byteToChar(r.Limit)
C.rocksdb_compact_range(db.c, cStart, C.size_t(len(r.Start)), cLimit, C.size_t(len(r.Limit)))
}
// CompactRangeCF runs a manual compaction on the Range of keys given on the
// given column family. This is not likely to be needed for typical usage.
func (db *DB) CompactRangeCF(cf *ColumnFamilyHandle, r Range) {
cStart := byteToChar(r.Start)
cLimit := byteToChar(r.Limit)
C.rocksdb_compact_range_cf(db.c, cf.c, cStart, C.size_t(len(r.Start)), cLimit, C.size_t(len(r.Limit)))
}
// Flush triggers a manuel flush for the database.
func (db *DB) Flush(opts *FlushOptions) error {
var cErr *C.char
C.rocksdb_flush(db.c, opts.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// DisableFileDeletions disables file deletions and should be used when backup the database.
func (db *DB) DisableFileDeletions() error {
var cErr *C.char
C.rocksdb_disable_file_deletions(db.c, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// EnableFileDeletions enables file deletions for the database.
func (db *DB) EnableFileDeletions(force bool) error {
var cErr *C.char
C.rocksdb_enable_file_deletions(db.c, boolToChar(force), &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// DeleteFile deletes the file name from the db directory and update the internal state to
// reflect that. Supports deletion of sst and log files only. 'name' must be
// path relative to the db directory. eg. 000001.sst, /archive/000003.log.
func (db *DB) DeleteFile(name string) {
cName := C.CString(name)
defer C.free(unsafe.Pointer(cName))
C.rocksdb_delete_file(db.c, cName)
}
// DeleteFileInRange deletes SST files that contain keys between the Range, [r.Start, limitKey]
func (db *DB) DeleteFileInRange(r Range) error {
cStartKey := byteToChar(r.Start)
cLimitKey := byteToChar(r.Limit)
var cErr *C.char
C.rocksdb_delete_file_in_range(
db.c,
cStartKey, C.size_t(len(r.Start)),
cLimitKey, C.size_t(len(r.Limit)),
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// DeleteFileInRangeCF deletes SST files that contain keys between the Range, [r.Start, r.Limit], and
// belong to a given column family
func (db *DB) DeleteFileInRangeCF(cf *ColumnFamilyHandle, r Range) error {
cStartKey := byteToChar(r.Start)
cLimitKey := byteToChar(r.Limit)
var cErr *C.char
C.rocksdb_delete_file_in_range_cf(
db.c,
cf.c,
cStartKey, C.size_t(len(r.Start)),
cLimitKey, C.size_t(len(r.Limit)),
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// IngestExternalFile loads a list of external SST files.
func (db *DB) IngestExternalFile(filePaths []string, opts *IngestExternalFileOptions) error {
cFilePaths := make([]*C.char, len(filePaths))
for i, s := range filePaths {
cFilePaths[i] = C.CString(s)
}
defer func() {
for _, s := range cFilePaths {
C.free(unsafe.Pointer(s))
}
}()
var cErr *C.char
C.rocksdb_ingest_external_file(
db.c,
&cFilePaths[0],
C.size_t(len(filePaths)),
opts.c,
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// IngestExternalFileCF loads a list of external SST files for a column family.
func (db *DB) IngestExternalFileCF(handle *ColumnFamilyHandle, filePaths []string, opts *IngestExternalFileOptions) error {
cFilePaths := make([]*C.char, len(filePaths))
for i, s := range filePaths {
cFilePaths[i] = C.CString(s)
}
defer func() {
for _, s := range cFilePaths {
C.free(unsafe.Pointer(s))
}
}()
var cErr *C.char
C.rocksdb_ingest_external_file_cf(
db.c,
handle.c,
&cFilePaths[0],
C.size_t(len(filePaths)),
opts.c,
&cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// NewCheckpoint creates a new Checkpoint for this db.
func (db *DB) NewCheckpoint() (*Checkpoint, error) {
var (
cErr *C.char
)
cCheckpoint := C.rocksdb_checkpoint_object_create(
db.c, &cErr,
)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return nil, errors.New(C.GoString(cErr))
}
return NewNativeCheckpoint(cCheckpoint), nil
}
// Close closes the database.
func (db *DB) Close() {
C.rocksdb_close(db.c)
}
// DestroyDb removes a database entirely, removing everything from the
// filesystem.
func DestroyDb(name string, opts *Options) error {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
C.rocksdb_destroy_db(opts.c, cName, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
// RepairDb repairs a database.
func RepairDb(name string, opts *Options) error {
var (
cErr *C.char
cName = C.CString(name)
)
defer C.free(unsafe.Pointer(cName))
C.rocksdb_repair_db(opts.c, cName, &cErr)
if cErr != nil {
defer C.free(unsafe.Pointer(cErr))
return errors.New(C.GoString(cErr))
}
return nil
}
|
/*
cyle's simple graph database, version 0.1
*/
package main
import "fmt"
import "net/http"
import "os"
import "io/ioutil"
import "bufio"
import "encoding/json" // documentation: http://golang.org/pkg/encoding/json/
// using gorest: https://code.google.com/p/gorest/wiki/GettingStarted?tm=6
import "code.google.com/p/gorest"
type AllTheData struct {
Name string
Nodes []Node
Connections []Connection
}
type Node struct {
Id int
Name string
ExtraJSONBytes []byte
ExtraJSON []interface{}
}
type Connection struct {
Id int
Name string
Source int
Target int
}
var db_filename string = "ALLTHEDATA.json"
var theData AllTheData
func main() {
theData.Name = "The Graph Database"
fmt.Println("Oh dear, a graph database...")
// if the database file exists, load it
check, _ := doesFileExist(db_filename);
if check {
loadAllTheData()
} else {
// create some dummy nodes!
for i := 1; i <= 10; i++ {
tmpNode := Node{ i, "Node "+fmt.Sprintf("%d", i), nil, nil }
theData.Nodes = append(theData.Nodes, tmpNode)
}
// create some dummy connections!
connOne := Connection{ 1, "Node 1 to 2", 1, 2 }
connTwo := Connection{ 2, "Node 2 to 3", 2, 3 }
connThree := Connection{ 3, "Node 3 to 4", 3, 4 }
connFour := Connection{ 4, "Node 4 to 5", 4, 5 }
connFive := Connection{ 5, "Node 5 to 6", 5, 6 }
connSix := Connection{ 5, "Node 3 to 9", 3, 9 }
connSeven := Connection{ 5, "Node 9 to 8", 9, 8 }
connEight := Connection{ 5, "Node 8 to 3", 8, 3 }
connNine := Connection{ 5, "Node 3 to 7", 3, 7 }
connTen := Connection{ 5, "Node 7 to 5", 7, 5 }
// add connections to the big data pool
theData.Connections = append(theData.Connections, connOne, connTwo, connThree, connFour, connFive)
// save this dummy data for future use
saveAllTheData()
}
// start the REST service to access the data
gorest.RegisterService(new(GraphService))
http.Handle("/", gorest.Handle())
http.ListenAndServe(":8777", nil)
}
type GraphService struct{
// service level config
gorest.RestService `root:"/" consumes:"application/json" produces:"application/json"`
// define routes
// deal with the root
rootHandler gorest.EndPoint `method:"GET" path:"/" output:"string"`
// node stuff
getNodesHandler gorest.EndPoint `method:"GET" path:"/nodes" output:"[]Node"`
getNodeHandler gorest.EndPoint `method:"GET" path:"/node/{Id:int}" output:"Node"`
postNodeHandler gorest.EndPoint `method:"POST" path:"/node" postdata:"Node"`
deleteNodeHandler gorest.EndPoint `method:"DELETE" path:"/node/{Id:int}"`
getConnectionsForNodeHandler gorest.EndPoint `method:"GET" path:"/node/{Id:int}/connections" output:"[]Connection"`
// connections stuff
getConnectionsHandler gorest.EndPoint `method:"GET" path:"/connections" output:"[]Connection"`
getConnectionHandler gorest.EndPoint `method:"GET" path:"/connection/{Id:int}" output:"Connection"`
postConnectionHandler gorest.EndPoint `method:"POST" path:"/connection" postdata:"Connection"`
deleteConnectionHandler gorest.EndPoint `method:"DELETE" path:"/connection/{Id:int}"`
// save the database
saveDatabaseHandler gorest.EndPoint `method:"GET" path:"/save" output:"string"`
}
func (serv GraphService) RootHandler() string {
return "Simple Graph Database, v0.1"
}
func (serv GraphService) SaveDatabaseHandler() string {
fmt.Println("Saving database to file")
saveAllTheData();
fmt.Println("Saved database to file")
return "okay"
}
/*
node functions
*/
func (serv GraphService) GetNodesHandler() []Node {
fmt.Println("Sending along current list of nodes")
return theData.Nodes
}
func (serv GraphService) GetNodeHandler(Id int) (n Node){
fmt.Printf("Asking for node ID: %d \n", Id)
for _, value := range theData.Nodes {
if value.Id == Id {
n = value
fmt.Printf("Giving: %+v \n", n)
return
}
}
/*
n.Id = Id
n.Name = "Some Node"
n.ExtraJSONBytes = []byte(`[{"Name": "Platypus", "Order": "Monotremata"}]`)
// this technique of taking arbitrary JSON and turning it into something usable came from: http://blog.golang.org/json-and-go
var tmp interface {}
err := json.Unmarshal(n.ExtraJSONBytes, &tmp)
if err != nil {
fmt.Println("error:", err)
}
//fmt.Printf("JSON: %T %+v \n", tmp, tmp)
n.ExtraJSON = tmp.([]interface{})
n.ExtraJSONBytes = nil
*/
// could not find it! send 404
serv.ResponseBuilder().SetResponseCode(404).Overide(true) //Overide causes the entity returned by the method to be ignored. Other wise it would send back zeroed object
return
}
func (serv GraphService) PostNodeHandler(n Node) {
fmt.Printf("Just got: %+v \n", n)
// check if this already exists. if so, update it.
for key, value := range theData.Nodes {
if value.Id == n.Id {
fmt.Printf("Updating node ID %d \n", n.Id)
theData.Nodes[key] = n
serv.ResponseBuilder().SetResponseCode(200)
return
}
}
// doesn't exist? create it.
fmt.Println("Creating new node based on input")
n.Id = len(theData.Nodes) + 1 // +1 because it's 1-based instead of 0-based
theData.Nodes = append(theData.Nodes, n)
serv.ResponseBuilder().SetResponseCode(201)
return
}
func (serv GraphService) DeleteNodeHandler(Id int) {
fmt.Printf("Trying to delete node ID %d \n", Id)
thekey := -1
for key, value := range theData.Nodes {
if value.Id == Id {
thekey = key
}
}
// look at all of this bullshit we have to do because of memory management
if thekey > -1 {
//fmt.Printf("Found the node to delete: %d \n", thekey)
var tmpWhatever []Node
if thekey == 0 {
tmpWhatever = make([]Node, len(theData.Nodes) - 1)
lastPartOfSlice := theData.Nodes[1:] // copy everything AFTER the node
for _, value := range lastPartOfSlice {
//fmt.Printf("Copying node: %+v \n", value)
tmpWhatever = append(tmpWhatever, value)
}
} else {
tmpWhatever = make([]Node, thekey)
firstPartOfSlice := theData.Nodes[:thekey]
copy(tmpWhatever, firstPartOfSlice) // copy everything BEFORE the node
//fmt.Printf("Nodes so far: %+v \n", tmpWhatever)
theNextKey := thekey + 1
lastPartOfSlice := theData.Nodes[theNextKey:] // copy everything AFTER the node
for _, value := range lastPartOfSlice {
//fmt.Printf("Copying node: %+v \n", value)
tmpWhatever = append(tmpWhatever, value)
}
}
//fmt.Printf("Nodes so far: %+v \n", tmpWhatever)
theData.Nodes = tmpWhatever
//fmt.Printf("Nodes should be copied now!\n")
fmt.Println("Node deleted")
} else {
fmt.Println("Could not find that node ID to delete, weird")
}
serv.ResponseBuilder().SetResponseCode(200)
return
}
func (serv GraphService) GetConnectionsForNodeHandler(Id int) (connections []Connection) {
// get the connections attached to a given node based on the node's ID
fmt.Printf("Asking for connections for node ID: %d \n", Id)
for _, conn := range theData.Connections {
if conn.Source == Id || conn.Target == Id {
connections = append(connections, conn)
}
}
if len(connections) > 0 {
return connections
} else {
// could not find any! send 404
serv.ResponseBuilder().SetResponseCode(404).Overide(true) //Overide causes the entity returned by the method to be ignored. Other wise it would send back zeroed object
return
}
}
/*
connection functions
*/
func (serv GraphService) GetConnectionsHandler() []Connection {
fmt.Println("Sending along current list of connections")
return theData.Connections
}
func (serv GraphService) GetConnectionHandler(Id int) (c Connection){
fmt.Printf("Asking for connection ID: %d \n", Id)
for _, value := range theData.Connections {
if value.Id == Id {
c = value
fmt.Printf("Giving: %+v \n", c)
return
}
}
// could not find it! send 404
serv.ResponseBuilder().SetResponseCode(404).Overide(true) //Overide causes the entity returned by the method to be ignored. Other wise it would send back zeroed object
return
}
func (serv GraphService) PostConnectionHandler(c Connection) {
fmt.Printf("Just got: %+v \n", c)
// make sure it's not invalid
if c.Source == c.Target {
fmt.Println("Cannot create connection where SOURCE and TARGET are the same")
serv.ResponseBuilder().SetResponseCode(400).Overide(true)
return
}
// check to see if connection already exists
for key, value := range theData.Connections {
if value.Id == c.Id {
fmt.Printf("Updating connection ID %d \n", c.Id)
theData.Connections[key] = c
serv.ResponseBuilder().SetResponseCode(200)
return
}
}
// does not exist! create a new connection.
fmt.Println("Creating new connection based on input")
c.Id = len(theData.Connections) + 1 // +1 because it's 1-based instead of 0-based
theData.Connections = append(theData.Connections, c)
serv.ResponseBuilder().SetResponseCode(201)
return
}
func (serv GraphService) DeleteConnectionHandler(Id int) {
fmt.Printf("Trying to delete connection ID %d", Id)
thekey := -1
for key, value := range theData.Connections {
if value.Id == Id {
thekey = key
}
}
if thekey > -1 {
var tmpWhatever []Connection
if thekey == 0 {
tmpWhatever = make([]Connection, len(theData.Connections) - 1)
lastPartOfSlice := theData.Connections[1:] // copy everything AFTER
for _, value := range lastPartOfSlice {
tmpWhatever = append(tmpWhatever, value)
}
} else {
tmpWhatever = make([]Connection, thekey)
firstPartOfSlice := theData.Connections[:thekey]
copy(tmpWhatever, firstPartOfSlice) // copy everything BEFORE
theNextKey := thekey + 1
lastPartOfSlice := theData.Connections[theNextKey:] // copy everything AFTER
for _, value := range lastPartOfSlice {
tmpWhatever = append(tmpWhatever, value)
}
}
theData.Connections = tmpWhatever
fmt.Println("Connection deleted")
} else {
fmt.Println("Could not find that connection ID to delete, weird")
}
serv.ResponseBuilder().SetResponseCode(200)
return
}
/*
save and load the data
*/
func saveAllTheData() {
// spit data out to JSON into a file
// open output file
fo, err := os.Create(db_filename)
if err != nil { panic(err) }
// close fo on exit and check for its returned error
defer func() {
if err := fo.Close(); err != nil {
panic(err)
}
}()
// make a write buffer
w := bufio.NewWriter(fo)
allTheDataJSON, err := json.Marshal(theData)
if err != nil { panic(err) }
_, err = w.Write(allTheDataJSON)
if (err != nil) { panic(err) }
if err = w.Flush(); err != nil { panic(err) }
}
func loadAllTheData() {
// ingest data via JSON from a file
allJSON, err := ioutil.ReadFile(db_filename)
if err != nil { panic(err) }
unmarshal_err := json.Unmarshal(allJSON, &theData)
if unmarshal_err != nil { panic(unmarshal_err) }
}
/*
helper functions
*/
// exists returns whether the given file or directory exists or not
// from: http://stackoverflow.com/questions/10510691/how-to-check-whether-a-file-or-directory-denoted-by-a-path-exists-in-golang
func doesFileExist(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil { return true, nil }
if os.IsNotExist(err) { return false, nil }
return false, err
}
- fixed unused variables bug
/*
cyle's simple graph database, version 0.1
*/
package main
import "fmt"
import "net/http"
import "os"
import "io/ioutil"
import "bufio"
import "encoding/json" // documentation: http://golang.org/pkg/encoding/json/
// using gorest: https://code.google.com/p/gorest/wiki/GettingStarted?tm=6
import "code.google.com/p/gorest"
type AllTheData struct {
Name string
Nodes []Node
Connections []Connection
}
type Node struct {
Id int
Name string
ExtraJSONBytes []byte
ExtraJSON []interface{}
}
type Connection struct {
Id int
Name string
Source int
Target int
}
var db_filename string = "ALLTHEDATA.json"
var theData AllTheData
func main() {
theData.Name = "The Graph Database"
fmt.Println("Oh dear, a graph database...")
// if the database file exists, load it
check, _ := doesFileExist(db_filename);
if check {
loadAllTheData()
} else {
// create some dummy nodes!
for i := 1; i <= 10; i++ {
tmpNode := Node{ i, "Node "+fmt.Sprintf("%d", i), nil, nil }
theData.Nodes = append(theData.Nodes, tmpNode)
}
// create some dummy connections!
connOne := Connection{ 1, "Node 1 to 2", 1, 2 }
connTwo := Connection{ 2, "Node 2 to 3", 2, 3 }
connThree := Connection{ 3, "Node 3 to 4", 3, 4 }
connFour := Connection{ 4, "Node 4 to 5", 4, 5 }
connFive := Connection{ 5, "Node 5 to 6", 5, 6 }
connSix := Connection{ 5, "Node 3 to 9", 3, 9 }
connSeven := Connection{ 5, "Node 9 to 8", 9, 8 }
connEight := Connection{ 5, "Node 8 to 3", 8, 3 }
connNine := Connection{ 5, "Node 3 to 7", 3, 7 }
connTen := Connection{ 5, "Node 7 to 5", 7, 5 }
// add connections to the big data pool
theData.Connections = append(theData.Connections, connOne, connTwo, connThree, connFour, connFive, connSix, connSeven, connEight, connNine, connTen)
// save this dummy data for future use
saveAllTheData()
}
// start the REST service to access the data
gorest.RegisterService(new(GraphService))
http.Handle("/", gorest.Handle())
http.ListenAndServe(":8777", nil)
}
type GraphService struct{
// service level config
gorest.RestService `root:"/" consumes:"application/json" produces:"application/json"`
// define routes
// deal with the root
rootHandler gorest.EndPoint `method:"GET" path:"/" output:"string"`
// node stuff
getNodesHandler gorest.EndPoint `method:"GET" path:"/nodes" output:"[]Node"`
getNodeHandler gorest.EndPoint `method:"GET" path:"/node/{Id:int}" output:"Node"`
postNodeHandler gorest.EndPoint `method:"POST" path:"/node" postdata:"Node"`
deleteNodeHandler gorest.EndPoint `method:"DELETE" path:"/node/{Id:int}"`
getConnectionsForNodeHandler gorest.EndPoint `method:"GET" path:"/node/{Id:int}/connections" output:"[]Connection"`
// connections stuff
getConnectionsHandler gorest.EndPoint `method:"GET" path:"/connections" output:"[]Connection"`
getConnectionHandler gorest.EndPoint `method:"GET" path:"/connection/{Id:int}" output:"Connection"`
postConnectionHandler gorest.EndPoint `method:"POST" path:"/connection" postdata:"Connection"`
deleteConnectionHandler gorest.EndPoint `method:"DELETE" path:"/connection/{Id:int}"`
// save the database
saveDatabaseHandler gorest.EndPoint `method:"GET" path:"/save" output:"string"`
}
func (serv GraphService) RootHandler() string {
return "Simple Graph Database, v0.1"
}
func (serv GraphService) SaveDatabaseHandler() string {
fmt.Println("Saving database to file")
saveAllTheData();
fmt.Println("Saved database to file")
return "okay"
}
/*
node functions
*/
func (serv GraphService) GetNodesHandler() []Node {
fmt.Println("Sending along current list of nodes")
return theData.Nodes
}
func (serv GraphService) GetNodeHandler(Id int) (n Node){
fmt.Printf("Asking for node ID: %d \n", Id)
for _, value := range theData.Nodes {
if value.Id == Id {
n = value
fmt.Printf("Giving: %+v \n", n)
return
}
}
/*
n.Id = Id
n.Name = "Some Node"
n.ExtraJSONBytes = []byte(`[{"Name": "Platypus", "Order": "Monotremata"}]`)
// this technique of taking arbitrary JSON and turning it into something usable came from: http://blog.golang.org/json-and-go
var tmp interface {}
err := json.Unmarshal(n.ExtraJSONBytes, &tmp)
if err != nil {
fmt.Println("error:", err)
}
//fmt.Printf("JSON: %T %+v \n", tmp, tmp)
n.ExtraJSON = tmp.([]interface{})
n.ExtraJSONBytes = nil
*/
// could not find it! send 404
serv.ResponseBuilder().SetResponseCode(404).Overide(true) //Overide causes the entity returned by the method to be ignored. Other wise it would send back zeroed object
return
}
func (serv GraphService) PostNodeHandler(n Node) {
fmt.Printf("Just got: %+v \n", n)
// check if this already exists. if so, update it.
for key, value := range theData.Nodes {
if value.Id == n.Id {
fmt.Printf("Updating node ID %d \n", n.Id)
theData.Nodes[key] = n
serv.ResponseBuilder().SetResponseCode(200)
return
}
}
// doesn't exist? create it.
fmt.Println("Creating new node based on input")
n.Id = len(theData.Nodes) + 1 // +1 because it's 1-based instead of 0-based
theData.Nodes = append(theData.Nodes, n)
serv.ResponseBuilder().SetResponseCode(201)
return
}
func (serv GraphService) DeleteNodeHandler(Id int) {
fmt.Printf("Trying to delete node ID %d \n", Id)
thekey := -1
for key, value := range theData.Nodes {
if value.Id == Id {
thekey = key
}
}
// look at all of this bullshit we have to do because of memory management
if thekey > -1 {
//fmt.Printf("Found the node to delete: %d \n", thekey)
var tmpWhatever []Node
if thekey == 0 {
tmpWhatever = make([]Node, len(theData.Nodes) - 1)
lastPartOfSlice := theData.Nodes[1:] // copy everything AFTER the node
for _, value := range lastPartOfSlice {
//fmt.Printf("Copying node: %+v \n", value)
tmpWhatever = append(tmpWhatever, value)
}
} else {
tmpWhatever = make([]Node, thekey)
firstPartOfSlice := theData.Nodes[:thekey]
copy(tmpWhatever, firstPartOfSlice) // copy everything BEFORE the node
//fmt.Printf("Nodes so far: %+v \n", tmpWhatever)
theNextKey := thekey + 1
lastPartOfSlice := theData.Nodes[theNextKey:] // copy everything AFTER the node
for _, value := range lastPartOfSlice {
//fmt.Printf("Copying node: %+v \n", value)
tmpWhatever = append(tmpWhatever, value)
}
}
//fmt.Printf("Nodes so far: %+v \n", tmpWhatever)
theData.Nodes = tmpWhatever
//fmt.Printf("Nodes should be copied now!\n")
fmt.Println("Node deleted")
} else {
fmt.Println("Could not find that node ID to delete, weird")
}
serv.ResponseBuilder().SetResponseCode(200)
return
}
func (serv GraphService) GetConnectionsForNodeHandler(Id int) (connections []Connection) {
// get the connections attached to a given node based on the node's ID
fmt.Printf("Asking for connections for node ID: %d \n", Id)
for _, conn := range theData.Connections {
if conn.Source == Id || conn.Target == Id {
connections = append(connections, conn)
}
}
if len(connections) > 0 {
return connections
} else {
// could not find any! send 404
serv.ResponseBuilder().SetResponseCode(404).Overide(true) //Overide causes the entity returned by the method to be ignored. Other wise it would send back zeroed object
return
}
}
/*
connection functions
*/
func (serv GraphService) GetConnectionsHandler() []Connection {
fmt.Println("Sending along current list of connections")
return theData.Connections
}
func (serv GraphService) GetConnectionHandler(Id int) (c Connection){
fmt.Printf("Asking for connection ID: %d \n", Id)
for _, value := range theData.Connections {
if value.Id == Id {
c = value
fmt.Printf("Giving: %+v \n", c)
return
}
}
// could not find it! send 404
serv.ResponseBuilder().SetResponseCode(404).Overide(true) //Overide causes the entity returned by the method to be ignored. Other wise it would send back zeroed object
return
}
func (serv GraphService) PostConnectionHandler(c Connection) {
fmt.Printf("Just got: %+v \n", c)
// make sure it's not invalid
if c.Source == c.Target {
fmt.Println("Cannot create connection where SOURCE and TARGET are the same")
serv.ResponseBuilder().SetResponseCode(400).Overide(true)
return
}
// check to see if connection already exists
for key, value := range theData.Connections {
if value.Id == c.Id {
fmt.Printf("Updating connection ID %d \n", c.Id)
theData.Connections[key] = c
serv.ResponseBuilder().SetResponseCode(200)
return
}
}
// does not exist! create a new connection.
fmt.Println("Creating new connection based on input")
c.Id = len(theData.Connections) + 1 // +1 because it's 1-based instead of 0-based
theData.Connections = append(theData.Connections, c)
serv.ResponseBuilder().SetResponseCode(201)
return
}
func (serv GraphService) DeleteConnectionHandler(Id int) {
fmt.Printf("Trying to delete connection ID %d", Id)
thekey := -1
for key, value := range theData.Connections {
if value.Id == Id {
thekey = key
}
}
if thekey > -1 {
var tmpWhatever []Connection
if thekey == 0 {
tmpWhatever = make([]Connection, len(theData.Connections) - 1)
lastPartOfSlice := theData.Connections[1:] // copy everything AFTER
for _, value := range lastPartOfSlice {
tmpWhatever = append(tmpWhatever, value)
}
} else {
tmpWhatever = make([]Connection, thekey)
firstPartOfSlice := theData.Connections[:thekey]
copy(tmpWhatever, firstPartOfSlice) // copy everything BEFORE
theNextKey := thekey + 1
lastPartOfSlice := theData.Connections[theNextKey:] // copy everything AFTER
for _, value := range lastPartOfSlice {
tmpWhatever = append(tmpWhatever, value)
}
}
theData.Connections = tmpWhatever
fmt.Println("Connection deleted")
} else {
fmt.Println("Could not find that connection ID to delete, weird")
}
serv.ResponseBuilder().SetResponseCode(200)
return
}
/*
save and load the data
*/
func saveAllTheData() {
// spit data out to JSON into a file
// open output file
fo, err := os.Create(db_filename)
if err != nil { panic(err) }
// close fo on exit and check for its returned error
defer func() {
if err := fo.Close(); err != nil {
panic(err)
}
}()
// make a write buffer
w := bufio.NewWriter(fo)
allTheDataJSON, err := json.Marshal(theData)
if err != nil { panic(err) }
_, err = w.Write(allTheDataJSON)
if (err != nil) { panic(err) }
if err = w.Flush(); err != nil { panic(err) }
}
func loadAllTheData() {
// ingest data via JSON from a file
allJSON, err := ioutil.ReadFile(db_filename)
if err != nil { panic(err) }
unmarshal_err := json.Unmarshal(allJSON, &theData)
if unmarshal_err != nil { panic(unmarshal_err) }
}
/*
helper functions
*/
// exists returns whether the given file or directory exists or not
// from: http://stackoverflow.com/questions/10510691/how-to-check-whether-a-file-or-directory-denoted-by-a-path-exists-in-golang
func doesFileExist(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil { return true, nil }
if os.IsNotExist(err) { return false, nil }
return false, err
} |
package levigo
// #cgo LDFLAGS: -lleveldb
// #include <stdlib.h>
// #include "levigo.h"
import "C"
import (
"unsafe"
)
type DatabaseError string
func (e DatabaseError) Error() string {
return string(e)
}
// DB is a reusable handle to a LevelDB database on disk, created by Open.
//
// To avoid memory and file descriptor leaks, call Close when you are
// through with the handle.
//
// All methods on a DB instance are thread-safe except for Close. Calls to
// any DB method made after Close will panic.
type DB struct {
Ldb *C.leveldb_t
}
// Range is a range of keys in the database. GetApproximateSizes calls with it
// begin at the key Start and end right before the key Limit.
type Range struct {
Start []byte
Limit []byte
}
// Open opens a database.
//
// Creating a new database is done by calling SetCreateIfMissing(true) on the
// Options passed to Open.
//
// It is usually wise to set a Cache object on the Options with SetCache to
// keep recently used data from that database in memory.
func Open(dbname string, o *Options) (*DB, error) {
var errStr *C.char
ldbname := C.CString(dbname)
defer C.free(unsafe.Pointer(ldbname))
leveldb := C.leveldb_open(o.Opt, ldbname, &errStr)
if errStr != nil {
return nil, DatabaseError(C.GoString(errStr))
}
return &DB{leveldb}, nil
}
// DestroyDatabase removes a database entirely, removing everything from the
// filesystem.
func DestroyDatabase(dbname string, o *Options) error {
var errStr *C.char
ldbname := C.CString(dbname)
defer C.free(unsafe.Pointer(ldbname))
C.leveldb_destroy_db(o.Opt, ldbname, &errStr)
if errStr != nil {
return DatabaseError(C.GoString(errStr))
}
return nil
}
// RepairDatabase attempts to repair a database.
//
// If the database is unrepairable, an error is returned.
func RepairDatabase(dbname string, o *Options) error {
var errStr *C.char
ldbname := C.CString(dbname)
defer C.free(unsafe.Pointer(ldbname))
C.leveldb_repair_db(o.Opt, ldbname, &errStr)
if errStr != nil {
return DatabaseError(C.GoString(errStr))
}
return nil
}
// Put writes data associated with a key to the database.
//
// If a nil []byte is passed in as value, it will be returned by Get as an
// zero-length slice.
//
// The key and value byte slices may be reused safely. Put takes a copy of
// them before returning.
func (db *DB) Put(wo *WriteOptions, key, value []byte) error {
var errStr *C.char
// leveldb_put, _get, and _delete call memcpy() (by way of Memtable::Add)
// when called, so we do not need to worry about these []byte being
// reclaimed by GC.
var k, v *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
if len(value) != 0 {
v = (*C.char)(unsafe.Pointer(&value[0]))
}
lenk := len(key)
lenv := len(value)
C.leveldb_put(
db.Ldb, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr)
if errStr != nil {
return DatabaseError(C.GoString(errStr))
}
return nil
}
// Get returns the data associated with the key from the database.
//
// If the key does not exist in the database, a nil []byte is returned. If the
// key does exist, but the data is zero-length in the database, a zero-length
// []byte will be returned.
//
// The key byte slice may be reused safely. Get takes a copy of
// them before returning.
func (db *DB) Get(ro *ReadOptions, key []byte) ([]byte, error) {
var errStr *C.char
var vallen C.size_t
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
value := C.leveldb_get(
db.Ldb, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
if errStr != nil {
return nil, DatabaseError(C.GoString(errStr))
}
if value == nil {
return nil, nil
}
return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil
}
// Delete removes the data associated with the key from the database.
//
// The key byte slice may be reused safely. Delete takes a copy of
// them before returning.
func (db *DB) Delete(wo *WriteOptions, key []byte) error {
var errStr *C.char
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
C.leveldb_delete(
db.Ldb, wo.Opt, k, C.size_t(len(key)), &errStr)
if errStr != nil {
return DatabaseError(C.GoString(errStr))
}
return nil
}
// Write atomically writes a WriteBatch to disk.
func (db *DB) Write(wo *WriteOptions, w *WriteBatch) error {
var errStr *C.char
C.leveldb_write(db.Ldb, wo.Opt, w.wbatch, &errStr)
if errStr != nil {
return DatabaseError(C.GoString(errStr))
}
return nil
}
// NewIterator returns an Iterator over the the database that uses the
// ReadOptions given.
//
// Often, this is used for large, offline bulk reads while serving live
// traffic. In that case, it may be wise to disable caching so that the data
// processed by the returned Iterator does not displace the already cached
// data. This can be done by calling SetFillCache(false) on the ReadOptions
// before passing it here.
//
// Similiarly, ReadOptions.SetSnapshot is also useful.
func (db *DB) NewIterator(ro *ReadOptions) *Iterator {
it := C.leveldb_create_iterator(db.Ldb, ro.Opt)
return &Iterator{Iter: it}
}
// GetApproximateSizes returns the approximate number of bytes of file system
// space used by one or more key ranges.
//
// The keys counted will begin at Range.Start and end on the key before
// Range.Limit.
func (db *DB) GetApproximateSizes(ranges []Range) []uint64 {
starts := make([]*C.char, len(ranges))
limits := make([]*C.char, len(ranges))
startLens := make([]C.size_t, len(ranges))
limitLens := make([]C.size_t, len(ranges))
for i, r := range ranges {
starts[i] = C.CString(string(r.Start))
startLens[i] = C.size_t(len(r.Start))
limits[i] = C.CString(string(r.Limit))
limitLens[i] = C.size_t(len(r.Limit))
}
sizes := make([]uint64, len(ranges))
numranges := C.int(len(ranges))
startsPtr := &starts[0]
limitsPtr := &limits[0]
startLensPtr := &startLens[0]
limitLensPtr := &limitLens[0]
sizesPtr := (*C.uint64_t)(&sizes[0])
C.levigo_leveldb_approximate_sizes(
db.Ldb, numranges, startsPtr, startLensPtr,
limitsPtr, limitLensPtr, sizesPtr)
for i, _ := range ranges {
C.free(unsafe.Pointer(starts[i]))
C.free(unsafe.Pointer(limits[i]))
}
return sizes
}
// PropertyValue returns the value of a database property.
//
// Examples of properties include "leveldb.stats", "leveldb.sstables",
// and "leveldb.num-files-at-level0".
func (db *DB) PropertyValue(propName string) string {
cname := C.CString(propName)
defer C.free(unsafe.Pointer(cname))
return C.GoString(C.leveldb_property_value(db.Ldb, cname))
}
// NewSnapshot creates a new snapshot of the database.
//
// The snapshot, when used in a ReadOptions, provides a consistent view of
// state of the database at the the snapshot was created.
//
// To prevent memory leaks and resource strain in the database, the snapshot
// returned must be released with this DB's ReleaseSnapshot method.
//
// See the LevelDB documentation for details.
func (db *DB) NewSnapshot() *C.leveldb_snapshot_t {
return C.leveldb_create_snapshot(db.Ldb)
}
// ReleaseSnapshot removes the snapshot from the database's list of snapshots,
// and deallocates it.
func (db *DB) ReleaseSnapshot(snap *C.leveldb_snapshot_t) {
C.leveldb_release_snapshot(db.Ldb, snap)
}
// Close closes the database, rendering it unusable for I/O, by deallocating
// the underlying handle.
//
// Any attempts to use the DB after Close is called will panic.
func (db *DB) Close() {
C.leveldb_close(db.Ldb)
}
reworking the DB struct godoc
package levigo
// #cgo LDFLAGS: -lleveldb
// #include <stdlib.h>
// #include "levigo.h"
import "C"
import (
"unsafe"
)
type DatabaseError string
func (e DatabaseError) Error() string {
return string(e)
}
// DB is a reusable handle to a LevelDB database on disk, created by Open.
//
// To avoid memory and file descriptor leaks, call Close when the process no
// longer needs the handle. Calls to any DB method made after Close will
// panic.
//
// The DB instance may be shared between goroutines. The usual data race
// conditions will occur if the same key is written to from more than one, of
// course.
type DB struct {
Ldb *C.leveldb_t
}
// Range is a range of keys in the database. GetApproximateSizes calls with it
// begin at the key Start and end right before the key Limit.
type Range struct {
Start []byte
Limit []byte
}
// Open opens a database.
//
// Creating a new database is done by calling SetCreateIfMissing(true) on the
// Options passed to Open.
//
// It is usually wise to set a Cache object on the Options with SetCache to
// keep recently used data from that database in memory.
func Open(dbname string, o *Options) (*DB, error) {
var errStr *C.char
ldbname := C.CString(dbname)
defer C.free(unsafe.Pointer(ldbname))
leveldb := C.leveldb_open(o.Opt, ldbname, &errStr)
if errStr != nil {
return nil, DatabaseError(C.GoString(errStr))
}
return &DB{leveldb}, nil
}
// DestroyDatabase removes a database entirely, removing everything from the
// filesystem.
func DestroyDatabase(dbname string, o *Options) error {
var errStr *C.char
ldbname := C.CString(dbname)
defer C.free(unsafe.Pointer(ldbname))
C.leveldb_destroy_db(o.Opt, ldbname, &errStr)
if errStr != nil {
return DatabaseError(C.GoString(errStr))
}
return nil
}
// RepairDatabase attempts to repair a database.
//
// If the database is unrepairable, an error is returned.
func RepairDatabase(dbname string, o *Options) error {
var errStr *C.char
ldbname := C.CString(dbname)
defer C.free(unsafe.Pointer(ldbname))
C.leveldb_repair_db(o.Opt, ldbname, &errStr)
if errStr != nil {
return DatabaseError(C.GoString(errStr))
}
return nil
}
// Put writes data associated with a key to the database.
//
// If a nil []byte is passed in as value, it will be returned by Get as an
// zero-length slice.
//
// The key and value byte slices may be reused safely. Put takes a copy of
// them before returning.
func (db *DB) Put(wo *WriteOptions, key, value []byte) error {
var errStr *C.char
// leveldb_put, _get, and _delete call memcpy() (by way of Memtable::Add)
// when called, so we do not need to worry about these []byte being
// reclaimed by GC.
var k, v *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
if len(value) != 0 {
v = (*C.char)(unsafe.Pointer(&value[0]))
}
lenk := len(key)
lenv := len(value)
C.leveldb_put(
db.Ldb, wo.Opt, k, C.size_t(lenk), v, C.size_t(lenv), &errStr)
if errStr != nil {
return DatabaseError(C.GoString(errStr))
}
return nil
}
// Get returns the data associated with the key from the database.
//
// If the key does not exist in the database, a nil []byte is returned. If the
// key does exist, but the data is zero-length in the database, a zero-length
// []byte will be returned.
//
// The key byte slice may be reused safely. Get takes a copy of
// them before returning.
func (db *DB) Get(ro *ReadOptions, key []byte) ([]byte, error) {
var errStr *C.char
var vallen C.size_t
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
value := C.leveldb_get(
db.Ldb, ro.Opt, k, C.size_t(len(key)), &vallen, &errStr)
if errStr != nil {
return nil, DatabaseError(C.GoString(errStr))
}
if value == nil {
return nil, nil
}
return C.GoBytes(unsafe.Pointer(value), C.int(vallen)), nil
}
// Delete removes the data associated with the key from the database.
//
// The key byte slice may be reused safely. Delete takes a copy of
// them before returning.
func (db *DB) Delete(wo *WriteOptions, key []byte) error {
var errStr *C.char
var k *C.char
if len(key) != 0 {
k = (*C.char)(unsafe.Pointer(&key[0]))
}
C.leveldb_delete(
db.Ldb, wo.Opt, k, C.size_t(len(key)), &errStr)
if errStr != nil {
return DatabaseError(C.GoString(errStr))
}
return nil
}
// Write atomically writes a WriteBatch to disk.
func (db *DB) Write(wo *WriteOptions, w *WriteBatch) error {
var errStr *C.char
C.leveldb_write(db.Ldb, wo.Opt, w.wbatch, &errStr)
if errStr != nil {
return DatabaseError(C.GoString(errStr))
}
return nil
}
// NewIterator returns an Iterator over the the database that uses the
// ReadOptions given.
//
// Often, this is used for large, offline bulk reads while serving live
// traffic. In that case, it may be wise to disable caching so that the data
// processed by the returned Iterator does not displace the already cached
// data. This can be done by calling SetFillCache(false) on the ReadOptions
// before passing it here.
//
// Similiarly, ReadOptions.SetSnapshot is also useful.
func (db *DB) NewIterator(ro *ReadOptions) *Iterator {
it := C.leveldb_create_iterator(db.Ldb, ro.Opt)
return &Iterator{Iter: it}
}
// GetApproximateSizes returns the approximate number of bytes of file system
// space used by one or more key ranges.
//
// The keys counted will begin at Range.Start and end on the key before
// Range.Limit.
func (db *DB) GetApproximateSizes(ranges []Range) []uint64 {
starts := make([]*C.char, len(ranges))
limits := make([]*C.char, len(ranges))
startLens := make([]C.size_t, len(ranges))
limitLens := make([]C.size_t, len(ranges))
for i, r := range ranges {
starts[i] = C.CString(string(r.Start))
startLens[i] = C.size_t(len(r.Start))
limits[i] = C.CString(string(r.Limit))
limitLens[i] = C.size_t(len(r.Limit))
}
sizes := make([]uint64, len(ranges))
numranges := C.int(len(ranges))
startsPtr := &starts[0]
limitsPtr := &limits[0]
startLensPtr := &startLens[0]
limitLensPtr := &limitLens[0]
sizesPtr := (*C.uint64_t)(&sizes[0])
C.levigo_leveldb_approximate_sizes(
db.Ldb, numranges, startsPtr, startLensPtr,
limitsPtr, limitLensPtr, sizesPtr)
for i, _ := range ranges {
C.free(unsafe.Pointer(starts[i]))
C.free(unsafe.Pointer(limits[i]))
}
return sizes
}
// PropertyValue returns the value of a database property.
//
// Examples of properties include "leveldb.stats", "leveldb.sstables",
// and "leveldb.num-files-at-level0".
func (db *DB) PropertyValue(propName string) string {
cname := C.CString(propName)
defer C.free(unsafe.Pointer(cname))
return C.GoString(C.leveldb_property_value(db.Ldb, cname))
}
// NewSnapshot creates a new snapshot of the database.
//
// The snapshot, when used in a ReadOptions, provides a consistent view of
// state of the database at the the snapshot was created.
//
// To prevent memory leaks and resource strain in the database, the snapshot
// returned must be released with this DB's ReleaseSnapshot method.
//
// See the LevelDB documentation for details.
func (db *DB) NewSnapshot() *C.leveldb_snapshot_t {
return C.leveldb_create_snapshot(db.Ldb)
}
// ReleaseSnapshot removes the snapshot from the database's list of snapshots,
// and deallocates it.
func (db *DB) ReleaseSnapshot(snap *C.leveldb_snapshot_t) {
C.leveldb_release_snapshot(db.Ldb, snap)
}
// Close closes the database, rendering it unusable for I/O, by deallocating
// the underlying handle.
//
// Any attempts to use the DB after Close is called will panic.
func (db *DB) Close() {
C.leveldb_close(db.Ldb)
}
|
package aspect
import (
"database/sql"
)
// Connection is a common interface for database connections or transactions
type Connection interface {
Begin() (*TX, error)
Commit() error
Execute(stmt Executable, args ...interface{}) (sql.Result, error)
Query(stmt Executable, args ...interface{}) (*Result, error)
QueryAll(stmt Executable, i interface{}) error
QueryOne(stmt Executable, i interface{}) error
Rollback() error
String(stmt Executable) string // Parameter-less output for logging
}
// Both DB and TX should implement the Connection interface
var _ Connection = &DB{}
var _ Connection = &TX{}
// TODO The db should be able to determine if a stmt should be used with
// either Exec() or Query()
// Executable statements implement the Compiles interface
type Executable interface {
Compiles
}
// DB wraps the current sql.DB connection pool and includes the Dialect
// associated with the connection.
type DB struct {
conn *sql.DB
dialect Dialect
}
// Begin starts a new transaction using the current database connection pool.
func (db *DB) Begin() (*TX, error) {
tx, err := db.conn.Begin()
return &TX{Tx: tx, dialect: db.dialect}, err
}
// Commit does nothing for a DB connection pool. It only exists for parity
// with transactions to implement the Connection interface.
func (db *DB) Commit() (err error) {
return
}
// Close closes the current database connection pool.
func (db *DB) Close() error {
return db.conn.Close()
}
// Dialect returns the dialect associated with the current database connection
// pool.
func (db *DB) Dialect() Dialect {
return db.dialect
}
// Execute executes the Executable statement with optional arguments. It
// returns the database/sql package's Result object, which may contain
// information on rows affected and last ID inserted depending on the driver.
func (db *DB) Execute(stmt Executable, args ...interface{}) (sql.Result, error) {
// Initialize a list of empty parameters
params := Params()
// TODO Columns are needed for name return types, tag matching, etc...
s, err := stmt.Compile(db.dialect, params)
if err != nil {
return nil, err
}
// TODO When to use the given arguments?
// TODO If args are structs, maps, or slices, unpack them
// Use any arguments given to Query() over compiled arguments
if len(args) == 0 {
args = params.args
}
return db.conn.Exec(s, args...)
}
// Query executes an Executable statement with the optional arguments. It
// returns a Result object, that can scan rows in various data types.
func (db *DB) Query(stmt Executable, args ...interface{}) (*Result, error) {
// Initialize a list of empty parameters
params := Params()
// TODO Columns are needed for name return types, tag matching, etc...
s, err := stmt.Compile(db.dialect, params)
if err != nil {
return nil, err
}
// TODO When to use the given arguments?
// TODO If args are structs, maps, or slices, unpack them
// Use any arguments given to Query() over compiled arguments
if len(args) == 0 {
args = params.args
}
rows, err := db.conn.Query(s, args...)
if err != nil {
return nil, err
}
// Wrap the sql rows in a result
return &Result{rows: rows, stmt: s}, nil
}
// QueryAll will query the statement and populate the given interface with all
// results.
func (db *DB) QueryAll(stmt Executable, i interface{}) error {
result, err := db.Query(stmt)
if err != nil {
return err
}
return result.All(i)
}
// QueryOne will query the statement and populate the given interface with a
// single result.
func (db *DB) QueryOne(stmt Executable, i interface{}) error {
result, err := db.Query(stmt)
if err != nil {
return err
}
// Close the result rows or sqlite3 will open another connection
defer result.rows.Close()
return result.One(i)
}
// Rollback does nothing for a DB connection pool. It only exists for parity
// with transactions to implement the Connection interface.
func (db *DB) Rollback() (err error) {
return
}
// String returns parameter-less SQL. If an error occurred during compilation,
// then an empty string will be returned.
func (db *DB) String(stmt Executable) string {
compiled, _ := stmt.Compile(db.dialect, Params())
return compiled
}
// Connect connects to the database using the given driver and credentials.
// It returns a database connection pool and an error if one occurred.
func Connect(driver, credentials string) (*DB, error) {
db, err := sql.Open(driver, credentials)
if err != nil {
return nil, err
}
// Get the dialect
dialect, err := GetDialect(driver)
if err != nil {
return nil, err
}
return &DB{conn: db, dialect: dialect}, nil
}
// TX wraps the current sql.Tx transaction and the Dialect associated with
// the transaction.
type TX struct {
*sql.Tx
dialect Dialect
}
// Begin returns the existing transaction. TODO Are nested transactions
// possible? And on what dialects?
func (tx *TX) Begin() (*TX, error) {
return tx, nil
}
// Commit calls the wrapped transactions Commit method.
func (tx *TX) Commit() error {
return tx.Tx.Commit()
}
// Query executes an Executable statement with the optional arguments
// using the current transaction. It returns a Result object, that can scan
// rows in various data types.
func (tx *TX) Query(stmt Executable, args ...interface{}) (*Result, error) {
// Initialize a list of empty parameters
params := Params()
// TODO Columns are needed for name return types, tag matching, etc...
s, err := stmt.Compile(tx.dialect, params)
if err != nil {
return nil, err
}
// TODO When to use the given arguments?
// TODO If args are structs, maps, or slices, unpack them
// Use any arguments given to Query() over compiled arguments
if len(args) == 0 {
args = params.args
}
rows, err := tx.Tx.Query(s, args...)
if err != nil {
return nil, err
}
// Wrap the sql rows in a result
return &Result{rows: rows, stmt: s}, nil
}
// QueryAll will query the statement using the current transaction and
// populate the given interface with all results.
func (tx *TX) QueryAll(stmt Executable, i interface{}) error {
result, err := tx.Query(stmt)
if err != nil {
return err
}
return result.All(i)
}
// QueryOne will query the statement using the current transaction and
// populate the given interface with a single result.
func (tx *TX) QueryOne(stmt Executable, i interface{}) error {
result, err := tx.Query(stmt)
if err != nil {
return err
}
// Close the result rows or sqlite3 will open another connection
defer result.rows.Close()
return result.One(i)
}
// Execute executes the Executable statement with optional arguments using
// the current transaction. It returns the database/sql package's Result
// object, which may contain information on rows affected and last ID inserted
// depending on the driver.
func (tx *TX) Execute(stmt Executable, args ...interface{}) (sql.Result, error) {
// Initialize a list of empty parameters
params := Params()
// TODO Columns are needed for name return types, tag matching, etc...
s, err := stmt.Compile(tx.dialect, params)
if err != nil {
return nil, err
}
// TODO When to use the given arguments?
// TODO If args are structs, maps, or slices, unpack them
// Use any arguments given to Query() over compiled arguments
if len(args) == 0 {
args = params.args
}
return tx.Exec(s, args...)
}
// Rollback calls the wrapped transactions Rollback method.
func (tx *TX) Rollback() error {
return tx.Tx.Rollback()
}
// String returns parameter-less SQL. If an error occurred during compilation,
// then an empty string will be returned.
func (tx *TX) String(stmt Executable) string {
compiled, _ := stmt.Compile(tx.dialect, Params())
return compiled
}
// WrapTx allows aspect to take control of an existing database/sql
// transaction and execute queries using the given dialect.
func WrapTx(tx *sql.Tx, dialect Dialect) *TX {
return &TX{Tx: tx, dialect: dialect}
}
Fake transaction for testing transactional blocks
package aspect
import (
"database/sql"
)
// Connection is a common interface for database connections or transactions
type Connection interface {
Begin() (*TX, error)
Commit() error
Execute(stmt Executable, args ...interface{}) (sql.Result, error)
Query(stmt Executable, args ...interface{}) (*Result, error)
QueryAll(stmt Executable, i interface{}) error
QueryOne(stmt Executable, i interface{}) error
Rollback() error
String(stmt Executable) string // Parameter-less output for logging
}
// Both DB and TX should implement the Connection interface
var _ Connection = &DB{}
var _ Connection = &TX{}
// TODO The db should be able to determine if a stmt should be used with
// either Exec() or Query()
// Executable statements implement the Compiles interface
type Executable interface {
Compiles
}
// DB wraps the current sql.DB connection pool and includes the Dialect
// associated with the connection.
type DB struct {
conn *sql.DB
dialect Dialect
}
// Begin starts a new transaction using the current database connection pool.
func (db *DB) Begin() (*TX, error) {
tx, err := db.conn.Begin()
return &TX{Tx: tx, dialect: db.dialect}, err
}
// Commit does nothing for a DB connection pool. It only exists for parity
// with transactions to implement the Connection interface.
func (db *DB) Commit() (err error) {
return
}
// Close closes the current database connection pool.
func (db *DB) Close() error {
return db.conn.Close()
}
// Dialect returns the dialect associated with the current database connection
// pool.
func (db *DB) Dialect() Dialect {
return db.dialect
}
// Execute executes the Executable statement with optional arguments. It
// returns the database/sql package's Result object, which may contain
// information on rows affected and last ID inserted depending on the driver.
func (db *DB) Execute(stmt Executable, args ...interface{}) (sql.Result, error) {
// Initialize a list of empty parameters
params := Params()
// TODO Columns are needed for name return types, tag matching, etc...
s, err := stmt.Compile(db.dialect, params)
if err != nil {
return nil, err
}
// TODO When to use the given arguments?
// TODO If args are structs, maps, or slices, unpack them
// Use any arguments given to Query() over compiled arguments
if len(args) == 0 {
args = params.args
}
return db.conn.Exec(s, args...)
}
// Query executes an Executable statement with the optional arguments. It
// returns a Result object, that can scan rows in various data types.
func (db *DB) Query(stmt Executable, args ...interface{}) (*Result, error) {
// Initialize a list of empty parameters
params := Params()
// TODO Columns are needed for name return types, tag matching, etc...
s, err := stmt.Compile(db.dialect, params)
if err != nil {
return nil, err
}
// TODO When to use the given arguments?
// TODO If args are structs, maps, or slices, unpack them
// Use any arguments given to Query() over compiled arguments
if len(args) == 0 {
args = params.args
}
rows, err := db.conn.Query(s, args...)
if err != nil {
return nil, err
}
// Wrap the sql rows in a result
return &Result{rows: rows, stmt: s}, nil
}
// QueryAll will query the statement and populate the given interface with all
// results.
func (db *DB) QueryAll(stmt Executable, i interface{}) error {
result, err := db.Query(stmt)
if err != nil {
return err
}
return result.All(i)
}
// QueryOne will query the statement and populate the given interface with a
// single result.
func (db *DB) QueryOne(stmt Executable, i interface{}) error {
result, err := db.Query(stmt)
if err != nil {
return err
}
// Close the result rows or sqlite3 will open another connection
defer result.rows.Close()
return result.One(i)
}
// Rollback does nothing for a DB connection pool. It only exists for parity
// with transactions to implement the Connection interface.
func (db *DB) Rollback() (err error) {
return
}
// String returns parameter-less SQL. If an error occurred during compilation,
// then an empty string will be returned.
func (db *DB) String(stmt Executable) string {
compiled, _ := stmt.Compile(db.dialect, Params())
return compiled
}
// Connect connects to the database using the given driver and credentials.
// It returns a database connection pool and an error if one occurred.
func Connect(driver, credentials string) (*DB, error) {
db, err := sql.Open(driver, credentials)
if err != nil {
return nil, err
}
// Get the dialect
dialect, err := GetDialect(driver)
if err != nil {
return nil, err
}
return &DB{conn: db, dialect: dialect}, nil
}
// TX wraps the current sql.Tx transaction and the Dialect associated with
// the transaction.
type TX struct {
*sql.Tx
dialect Dialect
}
// Begin returns the existing transaction. TODO Are nested transactions
// possible? And on what dialects?
func (tx *TX) Begin() (*TX, error) {
return tx, nil
}
// Commit calls the wrapped transactions Commit method.
func (tx *TX) Commit() error {
return tx.Tx.Commit()
}
// Query executes an Executable statement with the optional arguments
// using the current transaction. It returns a Result object, that can scan
// rows in various data types.
func (tx *TX) Query(stmt Executable, args ...interface{}) (*Result, error) {
// Initialize a list of empty parameters
params := Params()
// TODO Columns are needed for name return types, tag matching, etc...
s, err := stmt.Compile(tx.dialect, params)
if err != nil {
return nil, err
}
// TODO When to use the given arguments?
// TODO If args are structs, maps, or slices, unpack them
// Use any arguments given to Query() over compiled arguments
if len(args) == 0 {
args = params.args
}
rows, err := tx.Tx.Query(s, args...)
if err != nil {
return nil, err
}
// Wrap the sql rows in a result
return &Result{rows: rows, stmt: s}, nil
}
// QueryAll will query the statement using the current transaction and
// populate the given interface with all results.
func (tx *TX) QueryAll(stmt Executable, i interface{}) error {
result, err := tx.Query(stmt)
if err != nil {
return err
}
return result.All(i)
}
// QueryOne will query the statement using the current transaction and
// populate the given interface with a single result.
func (tx *TX) QueryOne(stmt Executable, i interface{}) error {
result, err := tx.Query(stmt)
if err != nil {
return err
}
// Close the result rows or sqlite3 will open another connection
defer result.rows.Close()
return result.One(i)
}
// Execute executes the Executable statement with optional arguments using
// the current transaction. It returns the database/sql package's Result
// object, which may contain information on rows affected and last ID inserted
// depending on the driver.
func (tx *TX) Execute(stmt Executable, args ...interface{}) (sql.Result, error) {
// Initialize a list of empty parameters
params := Params()
// TODO Columns are needed for name return types, tag matching, etc...
s, err := stmt.Compile(tx.dialect, params)
if err != nil {
return nil, err
}
// TODO When to use the given arguments?
// TODO If args are structs, maps, or slices, unpack them
// Use any arguments given to Query() over compiled arguments
if len(args) == 0 {
args = params.args
}
return tx.Exec(s, args...)
}
// Rollback calls the wrapped transactions Rollback method.
func (tx *TX) Rollback() error {
return tx.Tx.Rollback()
}
// String returns parameter-less SQL. If an error occurred during compilation,
// then an empty string will be returned.
func (tx *TX) String(stmt Executable) string {
compiled, _ := stmt.Compile(tx.dialect, Params())
return compiled
}
// WrapTx allows aspect to take control of an existing database/sql
// transaction and execute queries using the given dialect.
func WrapTx(tx *sql.Tx, dialect Dialect) *TX {
return &TX{Tx: tx, dialect: dialect}
}
type fakeTX struct {
*TX
}
var _ Connection = &fakeTX{}
func (tx *fakeTX) Commit() error {
return nil
}
func (tx *fakeTX) Rollback() error {
return nil
}
// FakeTx allows testing of transactional blocks of code. Commit and Rollback
// do nothing.
func FakeTx(tx *TX) *fakeTX {
return &fakeTX{TX: tx}
}
|
/*
* Copyright 2017 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package badger
import (
"bytes"
"context"
"encoding/binary"
"expvar"
"fmt"
"math"
"os"
"path/filepath"
"sort"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/dgraph-io/badger/v2/options"
"github.com/dgraph-io/badger/v2/pb"
"github.com/dgraph-io/badger/v2/skl"
"github.com/dgraph-io/badger/v2/table"
"github.com/dgraph-io/badger/v2/y"
"github.com/dgraph-io/ristretto"
"github.com/dgraph-io/ristretto/z"
humanize "github.com/dustin/go-humanize"
"github.com/pkg/errors"
)
var (
badgerPrefix = []byte("!badger!") // Prefix for internal keys used by badger.
head = []byte("!badger!head") // For storing value offset for replay.
txnKey = []byte("!badger!txn") // For indicating end of entries in txn.
badgerMove = []byte("!badger!move") // For key-value pairs which got moved during GC.
lfDiscardStatsKey = []byte("!badger!discard") // For storing lfDiscardStats
)
type closers struct {
updateSize *z.Closer
compactors *z.Closer
memtable *z.Closer
writes *z.Closer
valueGC *z.Closer
pub *z.Closer
}
// DB provides the various functions required to interact with Badger.
// DB is thread-safe.
type DB struct {
sync.RWMutex // Guards list of inmemory tables, not individual reads and writes.
dirLockGuard *directoryLockGuard
// nil if Dir and ValueDir are the same
valueDirGuard *directoryLockGuard
closers closers
mt *skl.Skiplist // Our latest (actively written) in-memory table
imm []*skl.Skiplist // Add here only AFTER pushing to flushChan.
opt Options
manifest *manifestFile
lc *levelsController
vlog valueLog
vhead valuePointer // less than or equal to a pointer to the last vlog value put into mt
writeCh chan *request
flushChan chan flushTask // For flushing memtables.
closeOnce sync.Once // For closing DB only once.
// Number of log rotates since the last memtable flush. We will access this field via atomic
// functions. Since we are not going to use any 64bit atomic functions, there is no need for
// 64 bit alignment of this struct(see #311).
logRotates int32
blockWrites int32
isClosed uint32
orc *oracle
pub *publisher
registry *KeyRegistry
blockCache *ristretto.Cache
indexCache *ristretto.Cache
}
const (
kvWriteChCapacity = 1000
)
func (db *DB) replayFunction() func(Entry, valuePointer) error {
type txnEntry struct {
nk []byte
v y.ValueStruct
}
var txn []txnEntry
var lastCommit uint64
toLSM := func(nk []byte, vs y.ValueStruct) {
for err := db.ensureRoomForWrite(); err != nil; err = db.ensureRoomForWrite() {
db.opt.Debugf("Replay: Making room for writes")
time.Sleep(10 * time.Millisecond)
}
db.mt.Put(nk, vs)
}
first := true
return func(e Entry, vp valuePointer) error { // Function for replaying.
if first {
db.opt.Debugf("First key=%q\n", e.Key)
}
first = false
db.orc.Lock()
if db.orc.nextTxnTs < y.ParseTs(e.Key) {
db.orc.nextTxnTs = y.ParseTs(e.Key)
}
db.orc.Unlock()
nk := make([]byte, len(e.Key))
copy(nk, e.Key)
var nv []byte
meta := e.meta
if db.shouldWriteValueToLSM(e) {
nv = make([]byte, len(e.Value))
copy(nv, e.Value)
} else {
nv = vp.Encode()
meta = meta | bitValuePointer
}
// Update vhead. If the crash happens while replay was in progess
// and the head is not updated, we will end up replaying all the
// files starting from file zero, again.
db.updateHead([]valuePointer{vp})
v := y.ValueStruct{
Value: nv,
Meta: meta,
UserMeta: e.UserMeta,
ExpiresAt: e.ExpiresAt,
}
switch {
case e.meta&bitFinTxn > 0:
txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
if err != nil {
return errors.Wrapf(err, "Unable to parse txn fin: %q", e.Value)
}
y.AssertTrue(lastCommit == txnTs)
y.AssertTrue(len(txn) > 0)
// Got the end of txn. Now we can store them.
for _, t := range txn {
toLSM(t.nk, t.v)
}
txn = txn[:0]
lastCommit = 0
case e.meta&bitTxn > 0:
txnTs := y.ParseTs(nk)
if lastCommit == 0 {
lastCommit = txnTs
}
if lastCommit != txnTs {
db.opt.Warningf("Found an incomplete txn at timestamp %d. Discarding it.\n",
lastCommit)
txn = txn[:0]
lastCommit = txnTs
}
te := txnEntry{nk: nk, v: v}
txn = append(txn, te)
default:
// This entry is from a rewrite or via SetEntryAt(..).
toLSM(nk, v)
// We shouldn't get this entry in the middle of a transaction.
y.AssertTrue(lastCommit == 0)
y.AssertTrue(len(txn) == 0)
}
return nil
}
}
// Open returns a new DB object.
func Open(opt Options) (db *DB, err error) {
// It's okay to have zero compactors which will disable all compactions but
// we cannot have just one compactor otherwise we will end up with all data
// on level 2.
if opt.NumCompactors == 1 {
return nil, errors.New("Cannot have 1 compactor. Need at least 2")
}
if opt.InMemory && (opt.Dir != "" || opt.ValueDir != "") {
return nil, errors.New("Cannot use badger in Disk-less mode with Dir or ValueDir set")
}
opt.maxBatchSize = (15 * opt.MaxTableSize) / 100
opt.maxBatchCount = opt.maxBatchSize / int64(skl.MaxNodeSize)
// We are limiting opt.ValueThreshold to maxValueThreshold for now.
if opt.ValueThreshold > maxValueThreshold {
return nil, errors.Errorf("Invalid ValueThreshold, must be less or equal to %d",
maxValueThreshold)
}
// If ValueThreshold is greater than opt.maxBatchSize, we won't be able to push any data using
// the transaction APIs. Transaction batches entries into batches of size opt.maxBatchSize.
if int64(opt.ValueThreshold) > opt.maxBatchSize {
return nil, errors.Errorf("Valuethreshold greater than max batch size of %d. Either "+
"reduce opt.ValueThreshold or increase opt.MaxTableSize.", opt.maxBatchSize)
}
if !(opt.ValueLogFileSize <= 2<<30 && opt.ValueLogFileSize >= 1<<20) {
return nil, ErrValueLogSize
}
if !(opt.ValueLogLoadingMode == options.FileIO ||
opt.ValueLogLoadingMode == options.MemoryMap) {
return nil, ErrInvalidLoadingMode
}
// Return error if badger is built without cgo and compression is set to ZSTD.
if opt.Compression == options.ZSTD && !y.CgoEnabled {
return nil, y.ErrZstdCgo
}
// Keep L0 in memory if either KeepL0InMemory is set or if InMemory is set.
opt.KeepL0InMemory = opt.KeepL0InMemory || opt.InMemory
// Compact L0 on close if either it is set or if KeepL0InMemory is set. When
// keepL0InMemory is set we need to compact L0 on close otherwise we might lose data.
opt.CompactL0OnClose = opt.CompactL0OnClose || opt.KeepL0InMemory
if opt.ReadOnly {
// Can't truncate if the DB is read only.
opt.Truncate = false
// Do not perform compaction in read only mode.
opt.CompactL0OnClose = false
}
var dirLockGuard, valueDirLockGuard *directoryLockGuard
// Create directories and acquire lock on it only if badger is not running in InMemory mode.
// We don't have any directories/files in InMemory mode so we don't need to acquire
// any locks on them.
if !opt.InMemory {
if err := createDirs(opt); err != nil {
return nil, err
}
if !opt.BypassLockGuard {
dirLockGuard, err = acquireDirectoryLock(opt.Dir, lockFile, opt.ReadOnly)
if err != nil {
return nil, err
}
defer func() {
if dirLockGuard != nil {
_ = dirLockGuard.release()
}
}()
absDir, err := filepath.Abs(opt.Dir)
if err != nil {
return nil, err
}
absValueDir, err := filepath.Abs(opt.ValueDir)
if err != nil {
return nil, err
}
if absValueDir != absDir {
valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile, opt.ReadOnly)
if err != nil {
return nil, err
}
defer func() {
if valueDirLockGuard != nil {
_ = valueDirLockGuard.release()
}
}()
}
}
}
manifestFile, manifest, err := openOrCreateManifestFile(opt)
if err != nil {
return nil, err
}
defer func() {
if manifestFile != nil {
_ = manifestFile.close()
}
}()
db = &DB{
imm: make([]*skl.Skiplist, 0, opt.NumMemtables),
flushChan: make(chan flushTask, opt.NumMemtables),
writeCh: make(chan *request, kvWriteChCapacity),
opt: opt,
manifest: manifestFile,
dirLockGuard: dirLockGuard,
valueDirGuard: valueDirLockGuard,
orc: newOracle(opt),
pub: newPublisher(),
}
// Cleanup all the goroutines started by badger in case of an error.
defer func() {
if err != nil {
db.cleanup()
db = nil
}
}()
if opt.BlockCacheSize > 0 {
config := ristretto.Config{
// Use 5% of cache memory for storing counters.
NumCounters: int64(float64(opt.BlockCacheSize) * 0.05 * 2),
MaxCost: int64(float64(opt.BlockCacheSize) * 0.95),
BufferItems: 64,
Metrics: true,
OnExit: table.BlockEvictHandler,
}
db.blockCache, err = ristretto.NewCache(&config)
if err != nil {
return nil, errors.Wrap(err, "failed to create data cache")
}
}
if opt.IndexCacheSize > 0 {
config := ristretto.Config{
// Use 5% of cache memory for storing counters.
NumCounters: int64(float64(opt.IndexCacheSize) * 0.05 * 2),
MaxCost: int64(float64(opt.IndexCacheSize) * 0.95),
BufferItems: 64,
Metrics: true,
}
db.indexCache, err = ristretto.NewCache(&config)
if err != nil {
return nil, errors.Wrap(err, "failed to create bf cache")
}
}
if db.opt.InMemory {
db.opt.SyncWrites = false
// If badger is running in memory mode, push everything into the LSM Tree.
db.opt.ValueThreshold = math.MaxInt32
}
krOpt := KeyRegistryOptions{
ReadOnly: opt.ReadOnly,
Dir: opt.Dir,
EncryptionKey: opt.EncryptionKey,
EncryptionKeyRotationDuration: opt.EncryptionKeyRotationDuration,
InMemory: opt.InMemory,
}
if db.registry, err = OpenKeyRegistry(krOpt); err != nil {
return db, err
}
db.calculateSize()
db.closers.updateSize = z.NewCloser(1)
go db.updateSize(db.closers.updateSize)
db.mt = skl.NewSkiplist(arenaSize(opt))
// newLevelsController potentially loads files in directory.
if db.lc, err = newLevelsController(db, &manifest); err != nil {
return db, err
}
// Initialize vlog struct.
db.vlog.init(db)
if !opt.ReadOnly {
db.closers.compactors = z.NewCloser(1)
db.lc.startCompact(db.closers.compactors)
db.closers.memtable = z.NewCloser(1)
go func() {
_ = db.flushMemtable(db.closers.memtable) // Need levels controller to be up.
}()
}
headKey := y.KeyWithTs(head, math.MaxUint64)
// Need to pass with timestamp, lsm get removes the last 8 bytes and compares key
vs, err := db.get(headKey)
if err != nil {
return db, errors.Wrap(err, "Retrieving head")
}
db.orc.nextTxnTs = vs.Version
var vptr valuePointer
if len(vs.Value) > 0 {
vptr.Decode(vs.Value)
}
replayCloser := z.NewCloser(1)
go db.doWrites(replayCloser)
if err = db.vlog.open(db, vptr, db.replayFunction()); err != nil {
replayCloser.SignalAndWait()
return db, y.Wrapf(err, "During db.vlog.open")
}
replayCloser.SignalAndWait() // Wait for replay to be applied first.
// Let's advance nextTxnTs to one more than whatever we observed via
// replaying the logs.
db.orc.txnMark.Done(db.orc.nextTxnTs)
// In normal mode, we must update readMark so older versions of keys can be removed during
// compaction when run in offline mode via the flatten tool.
db.orc.readMark.Done(db.orc.nextTxnTs)
db.orc.incrementNextTs()
db.closers.writes = z.NewCloser(1)
go db.doWrites(db.closers.writes)
if !db.opt.InMemory {
db.closers.valueGC = z.NewCloser(1)
go db.vlog.waitOnGC(db.closers.valueGC)
}
db.closers.pub = z.NewCloser(1)
go db.pub.listenForUpdates(db.closers.pub)
valueDirLockGuard = nil
dirLockGuard = nil
manifestFile = nil
return db, nil
}
// cleanup stops all the goroutines started by badger. This is used in open to
// cleanup goroutines in case of an error.
func (db *DB) cleanup() {
db.stopMemoryFlush()
db.stopCompactions()
db.blockCache.Close()
db.indexCache.Close()
if db.closers.updateSize != nil {
db.closers.updateSize.Signal()
}
if db.closers.valueGC != nil {
db.closers.valueGC.Signal()
}
if db.closers.writes != nil {
db.closers.writes.Signal()
}
if db.closers.pub != nil {
db.closers.pub.Signal()
}
db.orc.Stop()
// Do not use vlog.Close() here. vlog.Close truncates the files. We don't
// want to truncate files unless the user has specified the truncate flag.
db.vlog.stopFlushDiscardStats()
}
// BlockCacheMetrics returns the metrics for the underlying block cache.
func (db *DB) BlockCacheMetrics() *ristretto.Metrics {
if db.blockCache != nil {
return db.blockCache.Metrics
}
return nil
}
// IndexCacheMetrics returns the metrics for the underlying index cache.
func (db *DB) IndexCacheMetrics() *ristretto.Metrics {
if db.indexCache != nil {
return db.indexCache.Metrics
}
return nil
}
// Close closes a DB. It's crucial to call it to ensure all the pending updates make their way to
// disk. Calling DB.Close() multiple times would still only close the DB once.
func (db *DB) Close() error {
var err error
db.closeOnce.Do(func() {
err = db.close()
})
return err
}
// IsClosed denotes if the badger DB is closed or not. A DB instance should not
// be used after closing it.
func (db *DB) IsClosed() bool {
return atomic.LoadUint32(&db.isClosed) == 1
}
func (db *DB) close() (err error) {
db.opt.Debugf("Closing database")
atomic.StoreInt32(&db.blockWrites, 1)
if !db.opt.InMemory {
// Stop value GC first.
db.closers.valueGC.SignalAndWait()
}
// Stop writes next.
db.closers.writes.SignalAndWait()
// Don't accept any more write.
close(db.writeCh)
db.closers.pub.SignalAndWait()
// Now close the value log.
if vlogErr := db.vlog.Close(); vlogErr != nil {
err = errors.Wrap(vlogErr, "DB.Close")
}
// Make sure that block writer is done pushing stuff into memtable!
// Otherwise, you will have a race condition: we are trying to flush memtables
// and remove them completely, while the block / memtable writer is still
// trying to push stuff into the memtable. This will also resolve the value
// offset problem: as we push into memtable, we update value offsets there.
if !db.mt.Empty() {
db.opt.Debugf("Flushing memtable")
for {
pushedFlushTask := func() bool {
db.Lock()
defer db.Unlock()
y.AssertTrue(db.mt != nil)
select {
case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
db.imm = append(db.imm, db.mt) // Flusher will attempt to remove this from s.imm.
db.mt = nil // Will segfault if we try writing!
db.opt.Debugf("pushed to flush chan\n")
return true
default:
// If we fail to push, we need to unlock and wait for a short while.
// The flushing operation needs to update s.imm. Otherwise, we have a deadlock.
// TODO: Think about how to do this more cleanly, maybe without any locks.
}
return false
}()
if pushedFlushTask {
break
}
time.Sleep(10 * time.Millisecond)
}
}
db.stopMemoryFlush()
db.stopCompactions()
// Force Compact L0
// We don't need to care about cstatus since no parallel compaction is running.
if db.opt.CompactL0OnClose {
err := db.lc.doCompact(173, compactionPriority{level: 0, score: 1.73})
switch err {
case errFillTables:
// This error only means that there might be enough tables to do a compaction. So, we
// should not report it to the end user to avoid confusing them.
case nil:
db.opt.Infof("Force compaction on level 0 done")
default:
db.opt.Warningf("While forcing compaction on level 0: %v", err)
}
}
if lcErr := db.lc.close(); err == nil {
err = errors.Wrap(lcErr, "DB.Close")
}
db.opt.Debugf("Waiting for closer")
db.closers.updateSize.SignalAndWait()
db.orc.Stop()
db.blockCache.Close()
db.indexCache.Close()
atomic.StoreUint32(&db.isClosed, 1)
if db.opt.InMemory {
return
}
if db.dirLockGuard != nil {
if guardErr := db.dirLockGuard.release(); err == nil {
err = errors.Wrap(guardErr, "DB.Close")
}
}
if db.valueDirGuard != nil {
if guardErr := db.valueDirGuard.release(); err == nil {
err = errors.Wrap(guardErr, "DB.Close")
}
}
if manifestErr := db.manifest.close(); err == nil {
err = errors.Wrap(manifestErr, "DB.Close")
}
if registryErr := db.registry.Close(); err == nil {
err = errors.Wrap(registryErr, "DB.Close")
}
// Fsync directories to ensure that lock file, and any other removed files whose directory
// we haven't specifically fsynced, are guaranteed to have their directory entry removal
// persisted to disk.
if syncErr := db.syncDir(db.opt.Dir); err == nil {
err = errors.Wrap(syncErr, "DB.Close")
}
if syncErr := db.syncDir(db.opt.ValueDir); err == nil {
err = errors.Wrap(syncErr, "DB.Close")
}
return err
}
// VerifyChecksum verifies checksum for all tables on all levels.
// This method can be used to verify checksum, if opt.ChecksumVerificationMode is NoVerification.
func (db *DB) VerifyChecksum() error {
return db.lc.verifyChecksum()
}
const (
lockFile = "LOCK"
)
// Sync syncs database content to disk. This function provides
// more control to user to sync data whenever required.
func (db *DB) Sync() error {
return db.vlog.sync(math.MaxUint32)
}
// getMemtables returns the current memtables and get references.
func (db *DB) getMemTables() ([]*skl.Skiplist, func()) {
db.RLock()
defer db.RUnlock()
tables := make([]*skl.Skiplist, len(db.imm)+1)
// Get mutable memtable.
tables[0] = db.mt
tables[0].IncrRef()
// Get immutable memtables.
last := len(db.imm) - 1
for i := range db.imm {
tables[i+1] = db.imm[last-i]
tables[i+1].IncrRef()
}
return tables, func() {
for _, tbl := range tables {
tbl.DecrRef()
}
}
}
// get returns the value in memtable or disk for given key.
// Note that value will include meta byte.
//
// IMPORTANT: We should never write an entry with an older timestamp for the same key, We need to
// maintain this invariant to search for the latest value of a key, or else we need to search in all
// tables and find the max version among them. To maintain this invariant, we also need to ensure
// that all versions of a key are always present in the same table from level 1, because compaction
// can push any table down.
//
// Update (Sep 22, 2018): To maintain the above invariant, and to allow keys to be moved from one
// value log to another (while reclaiming space during value log GC), we have logically moved this
// need to write "old versions after new versions" to the badgerMove keyspace. Thus, for normal
// gets, we can stop going down the LSM tree once we find any version of the key (note however that
// we will ALWAYS skip versions with ts greater than the key version). However, if that key has
// been moved, then for the corresponding movekey, we'll look through all the levels of the tree
// to ensure that we pick the highest version of the movekey present.
func (db *DB) get(key []byte) (y.ValueStruct, error) {
if db.IsClosed() {
return y.ValueStruct{}, ErrDBClosed
}
tables, decr := db.getMemTables() // Lock should be released.
defer decr()
var maxVs *y.ValueStruct
var version uint64
if bytes.HasPrefix(key, badgerMove) {
// If we are checking badgerMove key, we should look into all the
// levels, so we can pick up the newer versions, which might have been
// compacted down the tree.
maxVs = &y.ValueStruct{}
version = y.ParseTs(key)
}
y.NumGets.Add(1)
for i := 0; i < len(tables); i++ {
vs := tables[i].Get(key)
y.NumMemtableGets.Add(1)
if vs.Meta == 0 && vs.Value == nil {
continue
}
// Found a version of the key. For user keyspace, return immediately. For move keyspace,
// continue iterating, unless we found a version == given key version.
if maxVs == nil || vs.Version == version {
return vs, nil
}
if maxVs.Version < vs.Version {
*maxVs = vs
}
}
return db.lc.get(key, maxVs, 0)
}
// updateHead should not be called without the db.Lock() since db.vhead is used
// by the writer go routines and memtable flushing goroutine.
func (db *DB) updateHead(ptrs []valuePointer) {
var ptr valuePointer
for i := len(ptrs) - 1; i >= 0; i-- {
p := ptrs[i]
if !p.IsZero() {
ptr = p
break
}
}
if ptr.IsZero() {
return
}
y.AssertTrue(!ptr.Less(db.vhead))
db.vhead = ptr
}
var requestPool = sync.Pool{
New: func() interface{} {
return new(request)
},
}
func (db *DB) shouldWriteValueToLSM(e Entry) bool {
return len(e.Value) < db.opt.ValueThreshold
}
func (db *DB) writeToLSM(b *request) error {
// We should check the length of b.Prts and b.Entries only when badger is not
// running in InMemory mode. In InMemory mode, we don't write anything to the
// value log and that's why the length of b.Ptrs will always be zero.
if !db.opt.InMemory && len(b.Ptrs) != len(b.Entries) {
return errors.Errorf("Ptrs and Entries don't match: %+v", b)
}
for i, entry := range b.Entries {
if entry.meta&bitFinTxn != 0 {
continue
}
if db.shouldWriteValueToLSM(*entry) { // Will include deletion / tombstone case.
db.mt.Put(entry.Key,
y.ValueStruct{
Value: entry.Value,
// Ensure value pointer flag is removed. Otherwise, the value will fail
// to be retrieved during iterator prefetch. `bitValuePointer` is only
// known to be set in write to LSM when the entry is loaded from a backup
// with lower ValueThreshold and its value was stored in the value log.
Meta: entry.meta &^ bitValuePointer,
UserMeta: entry.UserMeta,
ExpiresAt: entry.ExpiresAt,
})
} else {
db.mt.Put(entry.Key,
y.ValueStruct{
Value: b.Ptrs[i].Encode(),
Meta: entry.meta | bitValuePointer,
UserMeta: entry.UserMeta,
ExpiresAt: entry.ExpiresAt,
})
}
}
return nil
}
// writeRequests is called serially by only one goroutine.
func (db *DB) writeRequests(reqs []*request) error {
if len(reqs) == 0 {
return nil
}
done := func(err error) {
for _, r := range reqs {
r.Err = err
r.Wg.Done()
}
}
db.opt.Debugf("writeRequests called. Writing to value log")
err := db.vlog.write(reqs)
if err != nil {
done(err)
return err
}
db.opt.Debugf("Sending updates to subscribers")
db.pub.sendUpdates(reqs)
db.opt.Debugf("Writing to memtable")
var count int
for _, b := range reqs {
if len(b.Entries) == 0 {
continue
}
count += len(b.Entries)
var i uint64
for err = db.ensureRoomForWrite(); err == errNoRoom; err = db.ensureRoomForWrite() {
i++
if i%100 == 0 {
db.opt.Debugf("Making room for writes")
}
// We need to poll a bit because both hasRoomForWrite and the flusher need access to s.imm.
// When flushChan is full and you are blocked there, and the flusher is trying to update s.imm,
// you will get a deadlock.
time.Sleep(10 * time.Millisecond)
}
if err != nil {
done(err)
return errors.Wrap(err, "writeRequests")
}
if err := db.writeToLSM(b); err != nil {
done(err)
return errors.Wrap(err, "writeRequests")
}
db.Lock()
db.updateHead(b.Ptrs)
db.Unlock()
}
done(nil)
db.opt.Debugf("%d entries written", count)
return nil
}
func (db *DB) sendToWriteCh(entries []*Entry) (*request, error) {
if atomic.LoadInt32(&db.blockWrites) == 1 {
return nil, ErrBlockedWrites
}
var count, size int64
for _, e := range entries {
size += int64(e.estimateSize(db.opt.ValueThreshold))
count++
}
if count >= db.opt.maxBatchCount || size >= db.opt.maxBatchSize {
return nil, ErrTxnTooBig
}
// We can only service one request because we need each txn to be stored in a contigous section.
// Txns should not interleave among other txns or rewrites.
req := requestPool.Get().(*request)
req.reset()
req.Entries = entries
req.Wg.Add(1)
req.IncrRef() // for db write
db.writeCh <- req // Handled in doWrites.
y.NumPuts.Add(int64(len(entries)))
return req, nil
}
func (db *DB) doWrites(lc *z.Closer) {
defer lc.Done()
pendingCh := make(chan struct{}, 1)
writeRequests := func(reqs []*request) {
if err := db.writeRequests(reqs); err != nil {
db.opt.Errorf("writeRequests: %v", err)
}
<-pendingCh
}
// This variable tracks the number of pending writes.
reqLen := new(expvar.Int)
y.PendingWrites.Set(db.opt.Dir, reqLen)
reqs := make([]*request, 0, 10)
for {
var r *request
select {
case r = <-db.writeCh:
case <-lc.HasBeenClosed():
goto closedCase
}
for {
reqs = append(reqs, r)
reqLen.Set(int64(len(reqs)))
if len(reqs) >= 3*kvWriteChCapacity {
pendingCh <- struct{}{} // blocking.
goto writeCase
}
select {
// Either push to pending, or continue to pick from writeCh.
case r = <-db.writeCh:
case pendingCh <- struct{}{}:
goto writeCase
case <-lc.HasBeenClosed():
goto closedCase
}
}
closedCase:
// All the pending request are drained.
// Don't close the writeCh, because it has be used in several places.
for {
select {
case r = <-db.writeCh:
reqs = append(reqs, r)
default:
pendingCh <- struct{}{} // Push to pending before doing a write.
writeRequests(reqs)
return
}
}
writeCase:
go writeRequests(reqs)
reqs = make([]*request, 0, 10)
reqLen.Set(0)
}
}
// batchSet applies a list of badger.Entry. If a request level error occurs it
// will be returned.
// Check(kv.BatchSet(entries))
func (db *DB) batchSet(entries []*Entry) error {
req, err := db.sendToWriteCh(entries)
if err != nil {
return err
}
return req.Wait()
}
// batchSetAsync is the asynchronous version of batchSet. It accepts a callback
// function which is called when all the sets are complete. If a request level
// error occurs, it will be passed back via the callback.
// err := kv.BatchSetAsync(entries, func(err error)) {
// Check(err)
// }
func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error {
req, err := db.sendToWriteCh(entries)
if err != nil {
return err
}
go func() {
err := req.Wait()
// Write is complete. Let's call the callback function now.
f(err)
}()
return nil
}
var errNoRoom = errors.New("No room for write")
// ensureRoomForWrite is always called serially.
func (db *DB) ensureRoomForWrite() error {
var err error
db.Lock()
defer db.Unlock()
// Here we determine if we need to force flush memtable. Given we rotated log file, it would
// make sense to force flush a memtable, so the updated value head would have a chance to be
// pushed to L0. Otherwise, it would not go to L0, until the memtable has been fully filled,
// which can take a lot longer if the write load has fewer keys and larger values. This force
// flush, thus avoids the need to read through a lot of log files on a crash and restart.
// Above approach is quite simple with small drawback. We are calling ensureRoomForWrite before
// inserting every entry in Memtable. We will get latest db.head after all entries for a request
// are inserted in Memtable. If we have done >= db.logRotates rotations, then while inserting
// first entry in Memtable, below condition will be true and we will endup flushing old value of
// db.head. Hence we are limiting no of value log files to be read to db.logRotates only.
forceFlush := atomic.LoadInt32(&db.logRotates) >= db.opt.LogRotatesToFlush
if !forceFlush && db.mt.MemSize() < db.opt.MaxTableSize {
return nil
}
y.AssertTrue(db.mt != nil) // A nil mt indicates that DB is being closed.
select {
case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
// After every memtable flush, let's reset the counter.
atomic.StoreInt32(&db.logRotates, 0)
// Ensure value log is synced to disk so this memtable's contents wouldn't be lost.
err = db.vlog.sync(db.vhead.Fid)
if err != nil {
return err
}
db.opt.Debugf("Flushing memtable, mt.size=%d size of flushChan: %d\n",
db.mt.MemSize(), len(db.flushChan))
// We manage to push this task. Let's modify imm.
db.imm = append(db.imm, db.mt)
db.mt = skl.NewSkiplist(arenaSize(db.opt))
// New memtable is empty. We certainly have room.
return nil
default:
// We need to do this to unlock and allow the flusher to modify imm.
return errNoRoom
}
}
func arenaSize(opt Options) int64 {
return opt.MaxTableSize + opt.maxBatchSize + opt.maxBatchCount*int64(skl.MaxNodeSize)
}
// buildL0Table builds a new table from the memtable.
func buildL0Table(ft flushTask, bopts table.Options) []byte {
iter := ft.mt.NewIterator()
defer iter.Close()
b := table.NewTableBuilder(bopts)
defer b.Close()
var vp valuePointer
for iter.SeekToFirst(); iter.Valid(); iter.Next() {
if len(ft.dropPrefixes) > 0 && hasAnyPrefixes(iter.Key(), ft.dropPrefixes) {
continue
}
vs := iter.Value()
if vs.Meta&bitValuePointer > 0 {
vp.Decode(vs.Value)
}
b.Add(iter.Key(), iter.Value(), vp.Len)
}
return b.Finish(true)
}
type flushTask struct {
mt *skl.Skiplist
vptr valuePointer
dropPrefixes [][]byte
}
func (db *DB) pushHead(ft flushTask) error {
// We don't need to store head pointer in the in-memory mode since we will
// never be replay anything.
if db.opt.InMemory {
return nil
}
// Ensure we never push a zero valued head pointer.
if ft.vptr.IsZero() {
return errors.New("Head should not be zero")
}
// Store badger head even if vptr is zero, need it for readTs
db.opt.Infof("Storing value log head: %+v\n", ft.vptr)
val := ft.vptr.Encode()
// Pick the max commit ts, so in case of crash, our read ts would be higher than all the
// commits.
headTs := y.KeyWithTs(head, db.orc.nextTs())
ft.mt.Put(headTs, y.ValueStruct{Value: val})
return nil
}
// handleFlushTask must be run serially.
func (db *DB) handleFlushTask(ft flushTask) error {
// There can be a scenario, when empty memtable is flushed. For example, memtable is empty and
// after writing request to value log, rotation count exceeds db.LogRotatesToFlush.
if ft.mt.Empty() {
return nil
}
if err := db.pushHead(ft); err != nil {
return err
}
dk, err := db.registry.latestDataKey()
if err != nil {
return y.Wrapf(err, "failed to get datakey in db.handleFlushTask")
}
bopts := buildTableOptions(db.opt)
bopts.DataKey = dk
// Builder does not need cache but the same options are used for opening table.
bopts.BlockCache = db.blockCache
bopts.IndexCache = db.indexCache
tableData := buildL0Table(ft, bopts)
fileID := db.lc.reserveFileID()
if db.opt.KeepL0InMemory {
tbl, err := table.OpenInMemoryTable(tableData, fileID, &bopts)
if err != nil {
return errors.Wrapf(err, "failed to open table in memory")
}
return db.lc.addLevel0Table(tbl)
}
fd, err := y.CreateSyncedFile(table.NewFilename(fileID, db.opt.Dir), true)
if err != nil {
return y.Wrap(err)
}
// Don't block just to sync the directory entry.
dirSyncCh := make(chan error, 1)
go func() { dirSyncCh <- db.syncDir(db.opt.Dir) }()
if _, err = fd.Write(tableData); err != nil {
db.opt.Errorf("ERROR while writing to level 0: %v", err)
return err
}
if dirSyncErr := <-dirSyncCh; dirSyncErr != nil {
// Do dir sync as best effort. No need to return due to an error there.
db.opt.Errorf("ERROR while syncing level directory: %v", dirSyncErr)
}
tbl, err := table.OpenTable(fd, bopts)
if err != nil {
db.opt.Debugf("ERROR while opening table: %v", err)
return err
}
// We own a ref on tbl.
err = db.lc.addLevel0Table(tbl) // This will incrRef
_ = tbl.DecrRef() // Releases our ref.
return err
}
// flushMemtable must keep running until we send it an empty flushTask. If there
// are errors during handling the flush task, we'll retry indefinitely.
func (db *DB) flushMemtable(lc *z.Closer) error {
defer lc.Done()
for ft := range db.flushChan {
if ft.mt == nil {
// We close db.flushChan now, instead of sending a nil ft.mt.
continue
}
for {
err := db.handleFlushTask(ft)
if err == nil {
// Update s.imm. Need a lock.
db.Lock()
// This is a single-threaded operation. ft.mt corresponds to the head of
// db.imm list. Once we flush it, we advance db.imm. The next ft.mt
// which would arrive here would match db.imm[0], because we acquire a
// lock over DB when pushing to flushChan.
// TODO: This logic is dirty AF. Any change and this could easily break.
y.AssertTrue(ft.mt == db.imm[0])
db.imm = db.imm[1:]
ft.mt.DecrRef() // Return memory.
db.Unlock()
break
}
// Encountered error. Retry indefinitely.
db.opt.Errorf("Failure while flushing memtable to disk: %v. Retrying...\n", err)
time.Sleep(time.Second)
}
}
return nil
}
func exists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return true, err
}
// This function does a filewalk, calculates the size of vlog and sst files and stores it in
// y.LSMSize and y.VlogSize.
func (db *DB) calculateSize() {
if db.opt.InMemory {
return
}
newInt := func(val int64) *expvar.Int {
v := new(expvar.Int)
v.Add(val)
return v
}
totalSize := func(dir string) (int64, int64) {
var lsmSize, vlogSize int64
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
ext := filepath.Ext(path)
switch ext {
case ".sst":
lsmSize += info.Size()
case ".vlog":
vlogSize += info.Size()
}
return nil
})
if err != nil {
db.opt.Debugf("Got error while calculating total size of directory: %s", dir)
}
return lsmSize, vlogSize
}
lsmSize, vlogSize := totalSize(db.opt.Dir)
y.LSMSize.Set(db.opt.Dir, newInt(lsmSize))
// If valueDir is different from dir, we'd have to do another walk.
if db.opt.ValueDir != db.opt.Dir {
_, vlogSize = totalSize(db.opt.ValueDir)
}
y.VlogSize.Set(db.opt.ValueDir, newInt(vlogSize))
}
func (db *DB) updateSize(lc *z.Closer) {
defer lc.Done()
if db.opt.InMemory {
return
}
metricsTicker := time.NewTicker(time.Minute)
defer metricsTicker.Stop()
for {
select {
case <-metricsTicker.C:
db.calculateSize()
case <-lc.HasBeenClosed():
return
}
}
}
// RunValueLogGC triggers a value log garbage collection.
//
// It picks value log files to perform GC based on statistics that are collected
// during compactions. If no such statistics are available, then log files are
// picked in random order. The process stops as soon as the first log file is
// encountered which does not result in garbage collection.
//
// When a log file is picked, it is first sampled. If the sample shows that we
// can discard at least discardRatio space of that file, it would be rewritten.
//
// If a call to RunValueLogGC results in no rewrites, then an ErrNoRewrite is
// thrown indicating that the call resulted in no file rewrites.
//
// We recommend setting discardRatio to 0.5, thus indicating that a file be
// rewritten if half the space can be discarded. This results in a lifetime
// value log write amplification of 2 (1 from original write + 0.5 rewrite +
// 0.25 + 0.125 + ... = 2). Setting it to higher value would result in fewer
// space reclaims, while setting it to a lower value would result in more space
// reclaims at the cost of increased activity on the LSM tree. discardRatio
// must be in the range (0.0, 1.0), both endpoints excluded, otherwise an
// ErrInvalidRequest is returned.
//
// Only one GC is allowed at a time. If another value log GC is running, or DB
// has been closed, this would return an ErrRejected.
//
// Note: Every time GC is run, it would produce a spike of activity on the LSM
// tree.
func (db *DB) RunValueLogGC(discardRatio float64) error {
if db.opt.InMemory {
return ErrGCInMemoryMode
}
if discardRatio >= 1.0 || discardRatio <= 0.0 {
return ErrInvalidRequest
}
// startLevel is the level from which we should search for the head key. When badger is running
// with KeepL0InMemory flag, all tables on L0 are kept in memory. This means we should pick head
// key from Level 1 onwards because if we pick the headkey from Level 0 we might end up losing
// data. See test TestL0GCBug.
startLevel := 0
if db.opt.KeepL0InMemory {
startLevel = 1
}
// Find head on disk
headKey := y.KeyWithTs(head, math.MaxUint64)
// Need to pass with timestamp, lsm get removes the last 8 bytes and compares key
val, err := db.lc.get(headKey, nil, startLevel)
if err != nil {
return errors.Wrap(err, "Retrieving head from on-disk LSM")
}
var head valuePointer
if len(val.Value) > 0 {
head.Decode(val.Value)
}
// Pick a log file and run GC
return db.vlog.runGC(discardRatio, head)
}
// Size returns the size of lsm and value log files in bytes. It can be used to decide how often to
// call RunValueLogGC.
func (db *DB) Size() (lsm, vlog int64) {
if y.LSMSize.Get(db.opt.Dir) == nil {
lsm, vlog = 0, 0
return
}
lsm = y.LSMSize.Get(db.opt.Dir).(*expvar.Int).Value()
vlog = y.VlogSize.Get(db.opt.ValueDir).(*expvar.Int).Value()
return
}
// Sequence represents a Badger sequence.
type Sequence struct {
sync.Mutex
db *DB
key []byte
next uint64
leased uint64
bandwidth uint64
}
// Next would return the next integer in the sequence, updating the lease by running a transaction
// if needed.
func (seq *Sequence) Next() (uint64, error) {
seq.Lock()
defer seq.Unlock()
if seq.next >= seq.leased {
if err := seq.updateLease(); err != nil {
return 0, err
}
}
val := seq.next
seq.next++
return val, nil
}
// Release the leased sequence to avoid wasted integers. This should be done right
// before closing the associated DB. However it is valid to use the sequence after
// it was released, causing a new lease with full bandwidth.
func (seq *Sequence) Release() error {
seq.Lock()
defer seq.Unlock()
err := seq.db.Update(func(txn *Txn) error {
item, err := txn.Get(seq.key)
if err != nil {
return err
}
var num uint64
if err := item.Value(func(v []byte) error {
num = binary.BigEndian.Uint64(v)
return nil
}); err != nil {
return err
}
if num == seq.leased {
var buf [8]byte
binary.BigEndian.PutUint64(buf[:], seq.next)
return txn.SetEntry(NewEntry(seq.key, buf[:]))
}
return nil
})
if err != nil {
return err
}
seq.leased = seq.next
return nil
}
func (seq *Sequence) updateLease() error {
return seq.db.Update(func(txn *Txn) error {
item, err := txn.Get(seq.key)
switch {
case err == ErrKeyNotFound:
seq.next = 0
case err != nil:
return err
default:
var num uint64
if err := item.Value(func(v []byte) error {
num = binary.BigEndian.Uint64(v)
return nil
}); err != nil {
return err
}
seq.next = num
}
lease := seq.next + seq.bandwidth
var buf [8]byte
binary.BigEndian.PutUint64(buf[:], lease)
if err = txn.SetEntry(NewEntry(seq.key, buf[:])); err != nil {
return err
}
seq.leased = lease
return nil
})
}
// GetSequence would initiate a new sequence object, generating it from the stored lease, if
// available, in the database. Sequence can be used to get a list of monotonically increasing
// integers. Multiple sequences can be created by providing different keys. Bandwidth sets the
// size of the lease, determining how many Next() requests can be served from memory.
//
// GetSequence is not supported on ManagedDB. Calling this would result in a panic.
func (db *DB) GetSequence(key []byte, bandwidth uint64) (*Sequence, error) {
if db.opt.managedTxns {
panic("Cannot use GetSequence with managedDB=true.")
}
switch {
case len(key) == 0:
return nil, ErrEmptyKey
case bandwidth == 0:
return nil, ErrZeroBandwidth
}
seq := &Sequence{
db: db,
key: key,
next: 0,
leased: 0,
bandwidth: bandwidth,
}
err := seq.updateLease()
return seq, err
}
// Tables gets the TableInfo objects from the level controller. If withKeysCount
// is true, TableInfo objects also contain counts of keys for the tables.
func (db *DB) Tables(withKeysCount bool) []TableInfo {
return db.lc.getTableInfo(withKeysCount)
}
// KeySplits can be used to get rough key ranges to divide up iteration over
// the DB.
func (db *DB) KeySplits(prefix []byte) []string {
var splits []string
// We just want table ranges here and not keys count.
for _, ti := range db.Tables(false) {
// We don't use ti.Left, because that has a tendency to store !badger
// keys.
if bytes.HasPrefix(ti.Right, prefix) {
splits = append(splits, string(ti.Right))
}
}
sort.Strings(splits)
return splits
}
// MaxBatchCount returns max possible entries in batch
func (db *DB) MaxBatchCount() int64 {
return db.opt.maxBatchCount
}
// MaxBatchSize returns max possible batch size
func (db *DB) MaxBatchSize() int64 {
return db.opt.maxBatchSize
}
func (db *DB) stopMemoryFlush() {
// Stop memtable flushes.
if db.closers.memtable != nil {
close(db.flushChan)
db.closers.memtable.SignalAndWait()
}
}
func (db *DB) stopCompactions() {
// Stop compactions.
if db.closers.compactors != nil {
db.closers.compactors.SignalAndWait()
}
}
func (db *DB) startCompactions() {
// Resume compactions.
if db.closers.compactors != nil {
db.closers.compactors = z.NewCloser(1)
db.lc.startCompact(db.closers.compactors)
}
}
func (db *DB) startMemoryFlush() {
// Start memory fluhser.
if db.closers.memtable != nil {
db.flushChan = make(chan flushTask, db.opt.NumMemtables)
db.closers.memtable = z.NewCloser(1)
go func() {
_ = db.flushMemtable(db.closers.memtable)
}()
}
}
// Flatten can be used to force compactions on the LSM tree so all the tables fall on the same
// level. This ensures that all the versions of keys are colocated and not split across multiple
// levels, which is necessary after a restore from backup. During Flatten, live compactions are
// stopped. Ideally, no writes are going on during Flatten. Otherwise, it would create competition
// between flattening the tree and new tables being created at level zero.
func (db *DB) Flatten(workers int) error {
db.stopCompactions()
defer db.startCompactions()
compactAway := func(cp compactionPriority) error {
db.opt.Infof("Attempting to compact with %+v\n", cp)
errCh := make(chan error, 1)
for i := 0; i < workers; i++ {
go func() {
errCh <- db.lc.doCompact(175, cp)
}()
}
var success int
var rerr error
for i := 0; i < workers; i++ {
err := <-errCh
if err != nil {
rerr = err
db.opt.Warningf("While running doCompact with %+v. Error: %v\n", cp, err)
} else {
success++
}
}
if success == 0 {
return rerr
}
// We could do at least one successful compaction. So, we'll consider this a success.
db.opt.Infof("%d compactor(s) succeeded. One or more tables from level %d compacted.\n",
success, cp.level)
return nil
}
hbytes := func(sz int64) string {
return humanize.Bytes(uint64(sz))
}
for {
db.opt.Infof("\n")
var levels []int
for i, l := range db.lc.levels {
sz := l.getTotalSize()
db.opt.Infof("Level: %d. %8s Size. %8s Max.\n",
i, hbytes(l.getTotalSize()), hbytes(l.maxTotalSize))
if sz > 0 {
levels = append(levels, i)
}
}
if len(levels) <= 1 {
prios := db.lc.pickCompactLevels()
if len(prios) == 0 || prios[0].score <= 1.0 {
db.opt.Infof("All tables consolidated into one level. Flattening done.\n")
return nil
}
if err := compactAway(prios[0]); err != nil {
return err
}
continue
}
// Create an artificial compaction priority, to ensure that we compact the level.
cp := compactionPriority{level: levels[0], score: 1.71}
if err := compactAway(cp); err != nil {
return err
}
}
}
func (db *DB) blockWrite() error {
// Stop accepting new writes.
if !atomic.CompareAndSwapInt32(&db.blockWrites, 0, 1) {
return ErrBlockedWrites
}
// Make all pending writes finish. The following will also close writeCh.
db.closers.writes.SignalAndWait()
db.opt.Infof("Writes flushed. Stopping compactions now...")
return nil
}
func (db *DB) unblockWrite() {
db.closers.writes = z.NewCloser(1)
go db.doWrites(db.closers.writes)
// Resume writes.
atomic.StoreInt32(&db.blockWrites, 0)
}
func (db *DB) prepareToDrop() (func(), error) {
if db.opt.ReadOnly {
panic("Attempting to drop data in read-only mode.")
}
// In order prepare for drop, we need to block the incoming writes and
// write it to db. Then, flush all the pending flushtask. So that, we
// don't miss any entries.
if err := db.blockWrite(); err != nil {
return nil, err
}
reqs := make([]*request, 0, 10)
for {
select {
case r := <-db.writeCh:
reqs = append(reqs, r)
default:
if err := db.writeRequests(reqs); err != nil {
db.opt.Errorf("writeRequests: %v", err)
}
db.stopMemoryFlush()
return func() {
db.opt.Infof("Resuming writes")
db.startMemoryFlush()
db.unblockWrite()
}, nil
}
}
}
// DropAll would drop all the data stored in Badger. It does this in the following way.
// - Stop accepting new writes.
// - Pause memtable flushes and compactions.
// - Pick all tables from all levels, create a changeset to delete all these
// tables and apply it to manifest.
// - Pick all log files from value log, and delete all of them. Restart value log files from zero.
// - Resume memtable flushes and compactions.
//
// NOTE: DropAll is resilient to concurrent writes, but not to reads. It is up to the user to not do
// any reads while DropAll is going on, otherwise they may result in panics. Ideally, both reads and
// writes are paused before running DropAll, and resumed after it is finished.
func (db *DB) DropAll() error {
f, err := db.dropAll()
if f != nil {
f()
}
return err
}
func (db *DB) dropAll() (func(), error) {
db.opt.Infof("DropAll called. Blocking writes...")
f, err := db.prepareToDrop()
if err != nil {
return f, err
}
// prepareToDrop will stop all the incomming write and flushes any pending flush tasks.
// Before we drop, we'll stop the compaction because anyways all the datas are going to
// be deleted.
db.stopCompactions()
resume := func() {
db.startCompactions()
f()
}
// Block all foreign interactions with memory tables.
db.Lock()
defer db.Unlock()
// Remove inmemory tables. Calling DecrRef for safety. Not sure if they're absolutely needed.
db.mt.DecrRef()
for _, mt := range db.imm {
mt.DecrRef()
}
db.imm = db.imm[:0]
db.mt = skl.NewSkiplist(arenaSize(db.opt)) // Set it up for future writes.
num, err := db.lc.dropTree()
if err != nil {
return resume, err
}
db.opt.Infof("Deleted %d SSTables. Now deleting value logs...\n", num)
num, err = db.vlog.dropAll()
if err != nil {
return resume, err
}
db.vhead = valuePointer{} // Zero it out.
db.lc.nextFileID = 1
db.opt.Infof("Deleted %d value log files. DropAll done.\n", num)
db.blockCache.Clear()
db.indexCache.Clear()
return resume, nil
}
// DropPrefix would drop all the keys with the provided prefix. It does this in the following way:
// - Stop accepting new writes.
// - Stop memtable flushes before acquiring lock. Because we're acquring lock here
// and memtable flush stalls for lock, which leads to deadlock
// - Flush out all memtables, skipping over keys with the given prefix, Kp.
// - Write out the value log header to memtables when flushing, so we don't accidentally bring Kp
// back after a restart.
// - Stop compaction.
// - Compact L0->L1, skipping over Kp.
// - Compact rest of the levels, Li->Li, picking tables which have Kp.
// - Resume memtable flushes, compactions and writes.
func (db *DB) DropPrefix(prefixes ...[]byte) error {
db.opt.Infof("DropPrefix Called")
f, err := db.prepareToDrop()
if err != nil {
return err
}
defer f()
// Block all foreign interactions with memory tables.
db.Lock()
defer db.Unlock()
db.imm = append(db.imm, db.mt)
for _, memtable := range db.imm {
if memtable.Empty() {
memtable.DecrRef()
continue
}
task := flushTask{
mt: memtable,
// Ensure that the head of value log gets persisted to disk.
vptr: db.vhead,
dropPrefixes: prefixes,
}
db.opt.Debugf("Flushing memtable")
if err := db.handleFlushTask(task); err != nil {
db.opt.Errorf("While trying to flush memtable: %v", err)
return err
}
memtable.DecrRef()
}
db.stopCompactions()
defer db.startCompactions()
db.imm = db.imm[:0]
db.mt = skl.NewSkiplist(arenaSize(db.opt))
// Drop prefixes from the levels.
if err := db.lc.dropPrefixes(prefixes); err != nil {
return err
}
db.opt.Infof("DropPrefix done")
return nil
}
// KVList contains a list of key-value pairs.
type KVList = pb.KVList
// Subscribe can be used to watch key changes for the given key prefixes.
// At least one prefix should be passed, or an error will be returned.
// You can use an empty prefix to monitor all changes to the DB.
// This function blocks until the given context is done or an error occurs.
// The given function will be called with a new KVList containing the modified keys and the
// corresponding values.
func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, prefixes ...[]byte) error {
if cb == nil {
return ErrNilCallback
}
c := z.NewCloser(1)
recvCh, id := db.pub.newSubscriber(c, prefixes...)
slurp := func(batch *pb.KVList) error {
for {
select {
case kvs := <-recvCh:
batch.Kv = append(batch.Kv, kvs.Kv...)
default:
if len(batch.GetKv()) > 0 {
return cb(batch)
}
return nil
}
}
}
for {
select {
case <-c.HasBeenClosed():
// No need to delete here. Closer will be called only while
// closing DB. Subscriber will be deleted by cleanSubscribers.
err := slurp(new(pb.KVList))
// Drain if any pending updates.
c.Done()
return err
case <-ctx.Done():
c.Done()
db.pub.deleteSubscriber(id)
// Delete the subscriber to avoid further updates.
return ctx.Err()
case batch := <-recvCh:
err := slurp(batch)
if err != nil {
c.Done()
// Delete the subscriber if there is an error by the callback.
db.pub.deleteSubscriber(id)
return err
}
}
}
}
// shouldEncrypt returns bool, which tells whether to encrypt or not.
func (db *DB) shouldEncrypt() bool {
return len(db.opt.EncryptionKey) > 0
}
func (db *DB) syncDir(dir string) error {
if db.opt.InMemory {
return nil
}
return syncDir(dir)
}
func createDirs(opt Options) error {
for _, path := range []string{opt.Dir, opt.ValueDir} {
dirExists, err := exists(path)
if err != nil {
return y.Wrapf(err, "Invalid Dir: %q", path)
}
if !dirExists {
if opt.ReadOnly {
return errors.Errorf("Cannot find directory %q for read-only open", path)
}
// Try to create the directory
err = os.MkdirAll(path, 0700)
if err != nil {
return y.Wrapf(err, "Error Creating Dir: %q", path)
}
}
}
return nil
}
// Stream the contents of this DB to a new DB with options outOptions that will be
// created in outDir.
func (db *DB) StreamDB(outOptions Options) error {
outDir := outOptions.Dir
// Open output DB.
outDB, err := OpenManaged(outOptions)
if err != nil {
return errors.Wrapf(err, "cannot open out DB at %s", outDir)
}
defer outDB.Close()
writer := outDB.NewStreamWriter()
if err := writer.Prepare(); err != nil {
errors.Wrapf(err, "cannot create stream writer in out DB at %s", outDir)
}
// Stream contents of DB to the output DB.
stream := db.NewStreamAt(math.MaxUint64)
stream.LogPrefix = fmt.Sprintf("Streaming DB to new DB at %s", outDir)
stream.Send = func(kvs *pb.KVList) error {
return writer.Write(kvs)
}
if err := stream.Orchestrate(context.Background()); err != nil {
return errors.Wrapf(err, "cannot stream DB to out DB at %s", outDir)
}
if err := writer.Flush(); err != nil {
return errors.Wrapf(err, "cannot flush writer")
}
return nil
}
chore(head): Dump all heads keys on open (#1474)
There have been issues with badger replays and we should dump all
the head keys present in badger when opening badger.
Fixes - DGRAPH-2343
/*
* Copyright 2017 Dgraph Labs, Inc. and Contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package badger
import (
"bytes"
"context"
"encoding/binary"
"expvar"
"fmt"
"math"
"os"
"path/filepath"
"sort"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/dgraph-io/badger/v2/options"
"github.com/dgraph-io/badger/v2/pb"
"github.com/dgraph-io/badger/v2/skl"
"github.com/dgraph-io/badger/v2/table"
"github.com/dgraph-io/badger/v2/y"
"github.com/dgraph-io/ristretto"
"github.com/dgraph-io/ristretto/z"
humanize "github.com/dustin/go-humanize"
"github.com/pkg/errors"
)
var (
badgerPrefix = []byte("!badger!") // Prefix for internal keys used by badger.
head = []byte("!badger!head") // For storing value offset for replay.
txnKey = []byte("!badger!txn") // For indicating end of entries in txn.
badgerMove = []byte("!badger!move") // For key-value pairs which got moved during GC.
lfDiscardStatsKey = []byte("!badger!discard") // For storing lfDiscardStats
)
type closers struct {
updateSize *z.Closer
compactors *z.Closer
memtable *z.Closer
writes *z.Closer
valueGC *z.Closer
pub *z.Closer
}
// DB provides the various functions required to interact with Badger.
// DB is thread-safe.
type DB struct {
sync.RWMutex // Guards list of inmemory tables, not individual reads and writes.
dirLockGuard *directoryLockGuard
// nil if Dir and ValueDir are the same
valueDirGuard *directoryLockGuard
closers closers
mt *skl.Skiplist // Our latest (actively written) in-memory table
imm []*skl.Skiplist // Add here only AFTER pushing to flushChan.
opt Options
manifest *manifestFile
lc *levelsController
vlog valueLog
vhead valuePointer // less than or equal to a pointer to the last vlog value put into mt
writeCh chan *request
flushChan chan flushTask // For flushing memtables.
closeOnce sync.Once // For closing DB only once.
// Number of log rotates since the last memtable flush. We will access this field via atomic
// functions. Since we are not going to use any 64bit atomic functions, there is no need for
// 64 bit alignment of this struct(see #311).
logRotates int32
blockWrites int32
isClosed uint32
orc *oracle
pub *publisher
registry *KeyRegistry
blockCache *ristretto.Cache
indexCache *ristretto.Cache
}
const (
kvWriteChCapacity = 1000
)
func (db *DB) replayFunction() func(Entry, valuePointer) error {
type txnEntry struct {
nk []byte
v y.ValueStruct
}
var txn []txnEntry
var lastCommit uint64
toLSM := func(nk []byte, vs y.ValueStruct) {
for err := db.ensureRoomForWrite(); err != nil; err = db.ensureRoomForWrite() {
db.opt.Debugf("Replay: Making room for writes")
time.Sleep(10 * time.Millisecond)
}
db.mt.Put(nk, vs)
}
first := true
return func(e Entry, vp valuePointer) error { // Function for replaying.
if first {
db.opt.Debugf("First key=%q\n", e.Key)
}
first = false
db.orc.Lock()
if db.orc.nextTxnTs < y.ParseTs(e.Key) {
db.orc.nextTxnTs = y.ParseTs(e.Key)
}
db.orc.Unlock()
nk := make([]byte, len(e.Key))
copy(nk, e.Key)
var nv []byte
meta := e.meta
if db.shouldWriteValueToLSM(e) {
nv = make([]byte, len(e.Value))
copy(nv, e.Value)
} else {
nv = vp.Encode()
meta = meta | bitValuePointer
}
// Update vhead. If the crash happens while replay was in progess
// and the head is not updated, we will end up replaying all the
// files starting from file zero, again.
db.updateHead([]valuePointer{vp})
v := y.ValueStruct{
Value: nv,
Meta: meta,
UserMeta: e.UserMeta,
ExpiresAt: e.ExpiresAt,
}
switch {
case e.meta&bitFinTxn > 0:
txnTs, err := strconv.ParseUint(string(e.Value), 10, 64)
if err != nil {
return errors.Wrapf(err, "Unable to parse txn fin: %q", e.Value)
}
y.AssertTrue(lastCommit == txnTs)
y.AssertTrue(len(txn) > 0)
// Got the end of txn. Now we can store them.
for _, t := range txn {
toLSM(t.nk, t.v)
}
txn = txn[:0]
lastCommit = 0
case e.meta&bitTxn > 0:
txnTs := y.ParseTs(nk)
if lastCommit == 0 {
lastCommit = txnTs
}
if lastCommit != txnTs {
db.opt.Warningf("Found an incomplete txn at timestamp %d. Discarding it.\n",
lastCommit)
txn = txn[:0]
lastCommit = txnTs
}
te := txnEntry{nk: nk, v: v}
txn = append(txn, te)
default:
// This entry is from a rewrite or via SetEntryAt(..).
toLSM(nk, v)
// We shouldn't get this entry in the middle of a transaction.
y.AssertTrue(lastCommit == 0)
y.AssertTrue(len(txn) == 0)
}
return nil
}
}
// Open returns a new DB object.
func Open(opt Options) (db *DB, err error) {
// It's okay to have zero compactors which will disable all compactions but
// we cannot have just one compactor otherwise we will end up with all data
// on level 2.
if opt.NumCompactors == 1 {
return nil, errors.New("Cannot have 1 compactor. Need at least 2")
}
if opt.InMemory && (opt.Dir != "" || opt.ValueDir != "") {
return nil, errors.New("Cannot use badger in Disk-less mode with Dir or ValueDir set")
}
opt.maxBatchSize = (15 * opt.MaxTableSize) / 100
opt.maxBatchCount = opt.maxBatchSize / int64(skl.MaxNodeSize)
// We are limiting opt.ValueThreshold to maxValueThreshold for now.
if opt.ValueThreshold > maxValueThreshold {
return nil, errors.Errorf("Invalid ValueThreshold, must be less or equal to %d",
maxValueThreshold)
}
// If ValueThreshold is greater than opt.maxBatchSize, we won't be able to push any data using
// the transaction APIs. Transaction batches entries into batches of size opt.maxBatchSize.
if int64(opt.ValueThreshold) > opt.maxBatchSize {
return nil, errors.Errorf("Valuethreshold greater than max batch size of %d. Either "+
"reduce opt.ValueThreshold or increase opt.MaxTableSize.", opt.maxBatchSize)
}
if !(opt.ValueLogFileSize <= 2<<30 && opt.ValueLogFileSize >= 1<<20) {
return nil, ErrValueLogSize
}
if !(opt.ValueLogLoadingMode == options.FileIO ||
opt.ValueLogLoadingMode == options.MemoryMap) {
return nil, ErrInvalidLoadingMode
}
// Return error if badger is built without cgo and compression is set to ZSTD.
if opt.Compression == options.ZSTD && !y.CgoEnabled {
return nil, y.ErrZstdCgo
}
// Keep L0 in memory if either KeepL0InMemory is set or if InMemory is set.
opt.KeepL0InMemory = opt.KeepL0InMemory || opt.InMemory
// Compact L0 on close if either it is set or if KeepL0InMemory is set. When
// keepL0InMemory is set we need to compact L0 on close otherwise we might lose data.
opt.CompactL0OnClose = opt.CompactL0OnClose || opt.KeepL0InMemory
if opt.ReadOnly {
// Can't truncate if the DB is read only.
opt.Truncate = false
// Do not perform compaction in read only mode.
opt.CompactL0OnClose = false
}
var dirLockGuard, valueDirLockGuard *directoryLockGuard
// Create directories and acquire lock on it only if badger is not running in InMemory mode.
// We don't have any directories/files in InMemory mode so we don't need to acquire
// any locks on them.
if !opt.InMemory {
if err := createDirs(opt); err != nil {
return nil, err
}
if !opt.BypassLockGuard {
dirLockGuard, err = acquireDirectoryLock(opt.Dir, lockFile, opt.ReadOnly)
if err != nil {
return nil, err
}
defer func() {
if dirLockGuard != nil {
_ = dirLockGuard.release()
}
}()
absDir, err := filepath.Abs(opt.Dir)
if err != nil {
return nil, err
}
absValueDir, err := filepath.Abs(opt.ValueDir)
if err != nil {
return nil, err
}
if absValueDir != absDir {
valueDirLockGuard, err = acquireDirectoryLock(opt.ValueDir, lockFile, opt.ReadOnly)
if err != nil {
return nil, err
}
defer func() {
if valueDirLockGuard != nil {
_ = valueDirLockGuard.release()
}
}()
}
}
}
manifestFile, manifest, err := openOrCreateManifestFile(opt)
if err != nil {
return nil, err
}
defer func() {
if manifestFile != nil {
_ = manifestFile.close()
}
}()
db = &DB{
imm: make([]*skl.Skiplist, 0, opt.NumMemtables),
flushChan: make(chan flushTask, opt.NumMemtables),
writeCh: make(chan *request, kvWriteChCapacity),
opt: opt,
manifest: manifestFile,
dirLockGuard: dirLockGuard,
valueDirGuard: valueDirLockGuard,
orc: newOracle(opt),
pub: newPublisher(),
}
// Cleanup all the goroutines started by badger in case of an error.
defer func() {
if err != nil {
db.cleanup()
db = nil
}
}()
if opt.BlockCacheSize > 0 {
config := ristretto.Config{
// Use 5% of cache memory for storing counters.
NumCounters: int64(float64(opt.BlockCacheSize) * 0.05 * 2),
MaxCost: int64(float64(opt.BlockCacheSize) * 0.95),
BufferItems: 64,
Metrics: true,
OnExit: table.BlockEvictHandler,
}
db.blockCache, err = ristretto.NewCache(&config)
if err != nil {
return nil, errors.Wrap(err, "failed to create data cache")
}
}
if opt.IndexCacheSize > 0 {
config := ristretto.Config{
// Use 5% of cache memory for storing counters.
NumCounters: int64(float64(opt.IndexCacheSize) * 0.05 * 2),
MaxCost: int64(float64(opt.IndexCacheSize) * 0.95),
BufferItems: 64,
Metrics: true,
}
db.indexCache, err = ristretto.NewCache(&config)
if err != nil {
return nil, errors.Wrap(err, "failed to create bf cache")
}
}
if db.opt.InMemory {
db.opt.SyncWrites = false
// If badger is running in memory mode, push everything into the LSM Tree.
db.opt.ValueThreshold = math.MaxInt32
}
krOpt := KeyRegistryOptions{
ReadOnly: opt.ReadOnly,
Dir: opt.Dir,
EncryptionKey: opt.EncryptionKey,
EncryptionKeyRotationDuration: opt.EncryptionKeyRotationDuration,
InMemory: opt.InMemory,
}
if db.registry, err = OpenKeyRegistry(krOpt); err != nil {
return db, err
}
db.calculateSize()
db.closers.updateSize = z.NewCloser(1)
go db.updateSize(db.closers.updateSize)
db.mt = skl.NewSkiplist(arenaSize(opt))
// newLevelsController potentially loads files in directory.
if db.lc, err = newLevelsController(db, &manifest); err != nil {
return db, err
}
// Initialize vlog struct.
db.vlog.init(db)
if !opt.ReadOnly {
db.closers.compactors = z.NewCloser(1)
db.lc.startCompact(db.closers.compactors)
db.closers.memtable = z.NewCloser(1)
go func() {
_ = db.flushMemtable(db.closers.memtable) // Need levels controller to be up.
}()
}
vptr, version := db.getHead()
db.orc.nextTxnTs = version
replayCloser := z.NewCloser(1)
go db.doWrites(replayCloser)
if err = db.vlog.open(db, vptr, db.replayFunction()); err != nil {
replayCloser.SignalAndWait()
return db, y.Wrapf(err, "During db.vlog.open")
}
replayCloser.SignalAndWait() // Wait for replay to be applied first.
// Let's advance nextTxnTs to one more than whatever we observed via
// replaying the logs.
db.orc.txnMark.Done(db.orc.nextTxnTs)
// In normal mode, we must update readMark so older versions of keys can be removed during
// compaction when run in offline mode via the flatten tool.
db.orc.readMark.Done(db.orc.nextTxnTs)
db.orc.incrementNextTs()
db.closers.writes = z.NewCloser(1)
go db.doWrites(db.closers.writes)
if !db.opt.InMemory {
db.closers.valueGC = z.NewCloser(1)
go db.vlog.waitOnGC(db.closers.valueGC)
}
db.closers.pub = z.NewCloser(1)
go db.pub.listenForUpdates(db.closers.pub)
valueDirLockGuard = nil
dirLockGuard = nil
manifestFile = nil
return db, nil
}
// getHead prints all the head pointer in the DB and return the max value.
func (db *DB) getHead() (valuePointer, uint64) {
// This is a hack. If we use newTransaction(..) we'll end up in deadlock
// since txnmark is not initialized when this function is called.
txn := Txn{
db: db,
readTs: math.MaxUint64, // Show all versions.
}
var vptr valuePointer
iopt := DefaultIteratorOptions
iopt.AllVersions = true
iopt.InternalAccess = true
// Do not prefetch values. This could cause a race condition since
// prefetching is done via goroutines.
iopt.PrefetchValues = false
iopt.Reverse = true
it := txn.NewKeyIterator(head, iopt)
defer it.Close()
it.Rewind()
if !it.Valid() {
db.opt.Infof("No head keys found")
return vptr, 0
}
var maxVersion uint64
db.opt.Infof("Found the following head pointers")
for ; it.Valid(); it.Next() {
item := it.Item()
err := item.Value(func(val []byte) error {
vptr.Decode(val)
db.opt.Infof("Fid: %d Len: %d Offset: %d Version: %d\n",
vptr.Fid, vptr.Len, vptr.Offset, item.Version())
return nil
})
// This shouldn't happen.
y.Check(err)
// We're iterating in the reverse order so the last item would be the
// one with the biggest version.
maxVersion = item.Version()
}
// If we have reached here it means there were some head key and so the
// version should never be zero.
y.AssertTrue(maxVersion != 0)
return vptr, maxVersion
}
// cleanup stops all the goroutines started by badger. This is used in open to
// cleanup goroutines in case of an error.
func (db *DB) cleanup() {
db.stopMemoryFlush()
db.stopCompactions()
db.blockCache.Close()
db.indexCache.Close()
if db.closers.updateSize != nil {
db.closers.updateSize.Signal()
}
if db.closers.valueGC != nil {
db.closers.valueGC.Signal()
}
if db.closers.writes != nil {
db.closers.writes.Signal()
}
if db.closers.pub != nil {
db.closers.pub.Signal()
}
db.orc.Stop()
// Do not use vlog.Close() here. vlog.Close truncates the files. We don't
// want to truncate files unless the user has specified the truncate flag.
db.vlog.stopFlushDiscardStats()
}
// BlockCacheMetrics returns the metrics for the underlying block cache.
func (db *DB) BlockCacheMetrics() *ristretto.Metrics {
if db.blockCache != nil {
return db.blockCache.Metrics
}
return nil
}
// IndexCacheMetrics returns the metrics for the underlying index cache.
func (db *DB) IndexCacheMetrics() *ristretto.Metrics {
if db.indexCache != nil {
return db.indexCache.Metrics
}
return nil
}
// Close closes a DB. It's crucial to call it to ensure all the pending updates make their way to
// disk. Calling DB.Close() multiple times would still only close the DB once.
func (db *DB) Close() error {
var err error
db.closeOnce.Do(func() {
err = db.close()
})
return err
}
// IsClosed denotes if the badger DB is closed or not. A DB instance should not
// be used after closing it.
func (db *DB) IsClosed() bool {
return atomic.LoadUint32(&db.isClosed) == 1
}
func (db *DB) close() (err error) {
db.opt.Debugf("Closing database")
atomic.StoreInt32(&db.blockWrites, 1)
if !db.opt.InMemory {
// Stop value GC first.
db.closers.valueGC.SignalAndWait()
}
// Stop writes next.
db.closers.writes.SignalAndWait()
// Don't accept any more write.
close(db.writeCh)
db.closers.pub.SignalAndWait()
// Now close the value log.
if vlogErr := db.vlog.Close(); vlogErr != nil {
err = errors.Wrap(vlogErr, "DB.Close")
}
// Make sure that block writer is done pushing stuff into memtable!
// Otherwise, you will have a race condition: we are trying to flush memtables
// and remove them completely, while the block / memtable writer is still
// trying to push stuff into the memtable. This will also resolve the value
// offset problem: as we push into memtable, we update value offsets there.
if !db.mt.Empty() {
db.opt.Debugf("Flushing memtable")
for {
pushedFlushTask := func() bool {
db.Lock()
defer db.Unlock()
y.AssertTrue(db.mt != nil)
select {
case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
db.imm = append(db.imm, db.mt) // Flusher will attempt to remove this from s.imm.
db.mt = nil // Will segfault if we try writing!
db.opt.Debugf("pushed to flush chan\n")
return true
default:
// If we fail to push, we need to unlock and wait for a short while.
// The flushing operation needs to update s.imm. Otherwise, we have a deadlock.
// TODO: Think about how to do this more cleanly, maybe without any locks.
}
return false
}()
if pushedFlushTask {
break
}
time.Sleep(10 * time.Millisecond)
}
}
db.stopMemoryFlush()
db.stopCompactions()
// Force Compact L0
// We don't need to care about cstatus since no parallel compaction is running.
if db.opt.CompactL0OnClose {
err := db.lc.doCompact(173, compactionPriority{level: 0, score: 1.73})
switch err {
case errFillTables:
// This error only means that there might be enough tables to do a compaction. So, we
// should not report it to the end user to avoid confusing them.
case nil:
db.opt.Infof("Force compaction on level 0 done")
default:
db.opt.Warningf("While forcing compaction on level 0: %v", err)
}
}
if lcErr := db.lc.close(); err == nil {
err = errors.Wrap(lcErr, "DB.Close")
}
db.opt.Debugf("Waiting for closer")
db.closers.updateSize.SignalAndWait()
db.orc.Stop()
db.blockCache.Close()
db.indexCache.Close()
atomic.StoreUint32(&db.isClosed, 1)
if db.opt.InMemory {
return
}
if db.dirLockGuard != nil {
if guardErr := db.dirLockGuard.release(); err == nil {
err = errors.Wrap(guardErr, "DB.Close")
}
}
if db.valueDirGuard != nil {
if guardErr := db.valueDirGuard.release(); err == nil {
err = errors.Wrap(guardErr, "DB.Close")
}
}
if manifestErr := db.manifest.close(); err == nil {
err = errors.Wrap(manifestErr, "DB.Close")
}
if registryErr := db.registry.Close(); err == nil {
err = errors.Wrap(registryErr, "DB.Close")
}
// Fsync directories to ensure that lock file, and any other removed files whose directory
// we haven't specifically fsynced, are guaranteed to have their directory entry removal
// persisted to disk.
if syncErr := db.syncDir(db.opt.Dir); err == nil {
err = errors.Wrap(syncErr, "DB.Close")
}
if syncErr := db.syncDir(db.opt.ValueDir); err == nil {
err = errors.Wrap(syncErr, "DB.Close")
}
return err
}
// VerifyChecksum verifies checksum for all tables on all levels.
// This method can be used to verify checksum, if opt.ChecksumVerificationMode is NoVerification.
func (db *DB) VerifyChecksum() error {
return db.lc.verifyChecksum()
}
const (
lockFile = "LOCK"
)
// Sync syncs database content to disk. This function provides
// more control to user to sync data whenever required.
func (db *DB) Sync() error {
return db.vlog.sync(math.MaxUint32)
}
// getMemtables returns the current memtables and get references.
func (db *DB) getMemTables() ([]*skl.Skiplist, func()) {
db.RLock()
defer db.RUnlock()
tables := make([]*skl.Skiplist, len(db.imm)+1)
// Get mutable memtable.
tables[0] = db.mt
tables[0].IncrRef()
// Get immutable memtables.
last := len(db.imm) - 1
for i := range db.imm {
tables[i+1] = db.imm[last-i]
tables[i+1].IncrRef()
}
return tables, func() {
for _, tbl := range tables {
tbl.DecrRef()
}
}
}
// get returns the value in memtable or disk for given key.
// Note that value will include meta byte.
//
// IMPORTANT: We should never write an entry with an older timestamp for the same key, We need to
// maintain this invariant to search for the latest value of a key, or else we need to search in all
// tables and find the max version among them. To maintain this invariant, we also need to ensure
// that all versions of a key are always present in the same table from level 1, because compaction
// can push any table down.
//
// Update (Sep 22, 2018): To maintain the above invariant, and to allow keys to be moved from one
// value log to another (while reclaiming space during value log GC), we have logically moved this
// need to write "old versions after new versions" to the badgerMove keyspace. Thus, for normal
// gets, we can stop going down the LSM tree once we find any version of the key (note however that
// we will ALWAYS skip versions with ts greater than the key version). However, if that key has
// been moved, then for the corresponding movekey, we'll look through all the levels of the tree
// to ensure that we pick the highest version of the movekey present.
func (db *DB) get(key []byte) (y.ValueStruct, error) {
if db.IsClosed() {
return y.ValueStruct{}, ErrDBClosed
}
tables, decr := db.getMemTables() // Lock should be released.
defer decr()
var maxVs *y.ValueStruct
var version uint64
if bytes.HasPrefix(key, badgerMove) {
// If we are checking badgerMove key, we should look into all the
// levels, so we can pick up the newer versions, which might have been
// compacted down the tree.
maxVs = &y.ValueStruct{}
version = y.ParseTs(key)
}
y.NumGets.Add(1)
for i := 0; i < len(tables); i++ {
vs := tables[i].Get(key)
y.NumMemtableGets.Add(1)
if vs.Meta == 0 && vs.Value == nil {
continue
}
// Found a version of the key. For user keyspace, return immediately. For move keyspace,
// continue iterating, unless we found a version == given key version.
if maxVs == nil || vs.Version == version {
return vs, nil
}
if maxVs.Version < vs.Version {
*maxVs = vs
}
}
return db.lc.get(key, maxVs, 0)
}
// updateHead should not be called without the db.Lock() since db.vhead is used
// by the writer go routines and memtable flushing goroutine.
func (db *DB) updateHead(ptrs []valuePointer) {
var ptr valuePointer
for i := len(ptrs) - 1; i >= 0; i-- {
p := ptrs[i]
if !p.IsZero() {
ptr = p
break
}
}
if ptr.IsZero() {
return
}
y.AssertTrue(!ptr.Less(db.vhead))
db.vhead = ptr
}
var requestPool = sync.Pool{
New: func() interface{} {
return new(request)
},
}
func (db *DB) shouldWriteValueToLSM(e Entry) bool {
return len(e.Value) < db.opt.ValueThreshold
}
func (db *DB) writeToLSM(b *request) error {
// We should check the length of b.Prts and b.Entries only when badger is not
// running in InMemory mode. In InMemory mode, we don't write anything to the
// value log and that's why the length of b.Ptrs will always be zero.
if !db.opt.InMemory && len(b.Ptrs) != len(b.Entries) {
return errors.Errorf("Ptrs and Entries don't match: %+v", b)
}
for i, entry := range b.Entries {
if entry.meta&bitFinTxn != 0 {
continue
}
if db.shouldWriteValueToLSM(*entry) { // Will include deletion / tombstone case.
db.mt.Put(entry.Key,
y.ValueStruct{
Value: entry.Value,
// Ensure value pointer flag is removed. Otherwise, the value will fail
// to be retrieved during iterator prefetch. `bitValuePointer` is only
// known to be set in write to LSM when the entry is loaded from a backup
// with lower ValueThreshold and its value was stored in the value log.
Meta: entry.meta &^ bitValuePointer,
UserMeta: entry.UserMeta,
ExpiresAt: entry.ExpiresAt,
})
} else {
db.mt.Put(entry.Key,
y.ValueStruct{
Value: b.Ptrs[i].Encode(),
Meta: entry.meta | bitValuePointer,
UserMeta: entry.UserMeta,
ExpiresAt: entry.ExpiresAt,
})
}
}
return nil
}
// writeRequests is called serially by only one goroutine.
func (db *DB) writeRequests(reqs []*request) error {
if len(reqs) == 0 {
return nil
}
done := func(err error) {
for _, r := range reqs {
r.Err = err
r.Wg.Done()
}
}
db.opt.Debugf("writeRequests called. Writing to value log")
err := db.vlog.write(reqs)
if err != nil {
done(err)
return err
}
db.opt.Debugf("Sending updates to subscribers")
db.pub.sendUpdates(reqs)
db.opt.Debugf("Writing to memtable")
var count int
for _, b := range reqs {
if len(b.Entries) == 0 {
continue
}
count += len(b.Entries)
var i uint64
for err = db.ensureRoomForWrite(); err == errNoRoom; err = db.ensureRoomForWrite() {
i++
if i%100 == 0 {
db.opt.Debugf("Making room for writes")
}
// We need to poll a bit because both hasRoomForWrite and the flusher need access to s.imm.
// When flushChan is full and you are blocked there, and the flusher is trying to update s.imm,
// you will get a deadlock.
time.Sleep(10 * time.Millisecond)
}
if err != nil {
done(err)
return errors.Wrap(err, "writeRequests")
}
if err := db.writeToLSM(b); err != nil {
done(err)
return errors.Wrap(err, "writeRequests")
}
db.Lock()
db.updateHead(b.Ptrs)
db.Unlock()
}
done(nil)
db.opt.Debugf("%d entries written", count)
return nil
}
func (db *DB) sendToWriteCh(entries []*Entry) (*request, error) {
if atomic.LoadInt32(&db.blockWrites) == 1 {
return nil, ErrBlockedWrites
}
var count, size int64
for _, e := range entries {
size += int64(e.estimateSize(db.opt.ValueThreshold))
count++
}
if count >= db.opt.maxBatchCount || size >= db.opt.maxBatchSize {
return nil, ErrTxnTooBig
}
// We can only service one request because we need each txn to be stored in a contigous section.
// Txns should not interleave among other txns or rewrites.
req := requestPool.Get().(*request)
req.reset()
req.Entries = entries
req.Wg.Add(1)
req.IncrRef() // for db write
db.writeCh <- req // Handled in doWrites.
y.NumPuts.Add(int64(len(entries)))
return req, nil
}
func (db *DB) doWrites(lc *z.Closer) {
defer lc.Done()
pendingCh := make(chan struct{}, 1)
writeRequests := func(reqs []*request) {
if err := db.writeRequests(reqs); err != nil {
db.opt.Errorf("writeRequests: %v", err)
}
<-pendingCh
}
// This variable tracks the number of pending writes.
reqLen := new(expvar.Int)
y.PendingWrites.Set(db.opt.Dir, reqLen)
reqs := make([]*request, 0, 10)
for {
var r *request
select {
case r = <-db.writeCh:
case <-lc.HasBeenClosed():
goto closedCase
}
for {
reqs = append(reqs, r)
reqLen.Set(int64(len(reqs)))
if len(reqs) >= 3*kvWriteChCapacity {
pendingCh <- struct{}{} // blocking.
goto writeCase
}
select {
// Either push to pending, or continue to pick from writeCh.
case r = <-db.writeCh:
case pendingCh <- struct{}{}:
goto writeCase
case <-lc.HasBeenClosed():
goto closedCase
}
}
closedCase:
// All the pending request are drained.
// Don't close the writeCh, because it has be used in several places.
for {
select {
case r = <-db.writeCh:
reqs = append(reqs, r)
default:
pendingCh <- struct{}{} // Push to pending before doing a write.
writeRequests(reqs)
return
}
}
writeCase:
go writeRequests(reqs)
reqs = make([]*request, 0, 10)
reqLen.Set(0)
}
}
// batchSet applies a list of badger.Entry. If a request level error occurs it
// will be returned.
// Check(kv.BatchSet(entries))
func (db *DB) batchSet(entries []*Entry) error {
req, err := db.sendToWriteCh(entries)
if err != nil {
return err
}
return req.Wait()
}
// batchSetAsync is the asynchronous version of batchSet. It accepts a callback
// function which is called when all the sets are complete. If a request level
// error occurs, it will be passed back via the callback.
// err := kv.BatchSetAsync(entries, func(err error)) {
// Check(err)
// }
func (db *DB) batchSetAsync(entries []*Entry, f func(error)) error {
req, err := db.sendToWriteCh(entries)
if err != nil {
return err
}
go func() {
err := req.Wait()
// Write is complete. Let's call the callback function now.
f(err)
}()
return nil
}
var errNoRoom = errors.New("No room for write")
// ensureRoomForWrite is always called serially.
func (db *DB) ensureRoomForWrite() error {
var err error
db.Lock()
defer db.Unlock()
// Here we determine if we need to force flush memtable. Given we rotated log file, it would
// make sense to force flush a memtable, so the updated value head would have a chance to be
// pushed to L0. Otherwise, it would not go to L0, until the memtable has been fully filled,
// which can take a lot longer if the write load has fewer keys and larger values. This force
// flush, thus avoids the need to read through a lot of log files on a crash and restart.
// Above approach is quite simple with small drawback. We are calling ensureRoomForWrite before
// inserting every entry in Memtable. We will get latest db.head after all entries for a request
// are inserted in Memtable. If we have done >= db.logRotates rotations, then while inserting
// first entry in Memtable, below condition will be true and we will endup flushing old value of
// db.head. Hence we are limiting no of value log files to be read to db.logRotates only.
forceFlush := atomic.LoadInt32(&db.logRotates) >= db.opt.LogRotatesToFlush
if !forceFlush && db.mt.MemSize() < db.opt.MaxTableSize {
return nil
}
y.AssertTrue(db.mt != nil) // A nil mt indicates that DB is being closed.
select {
case db.flushChan <- flushTask{mt: db.mt, vptr: db.vhead}:
// After every memtable flush, let's reset the counter.
atomic.StoreInt32(&db.logRotates, 0)
// Ensure value log is synced to disk so this memtable's contents wouldn't be lost.
err = db.vlog.sync(db.vhead.Fid)
if err != nil {
return err
}
db.opt.Debugf("Flushing memtable, mt.size=%d size of flushChan: %d\n",
db.mt.MemSize(), len(db.flushChan))
// We manage to push this task. Let's modify imm.
db.imm = append(db.imm, db.mt)
db.mt = skl.NewSkiplist(arenaSize(db.opt))
// New memtable is empty. We certainly have room.
return nil
default:
// We need to do this to unlock and allow the flusher to modify imm.
return errNoRoom
}
}
func arenaSize(opt Options) int64 {
return opt.MaxTableSize + opt.maxBatchSize + opt.maxBatchCount*int64(skl.MaxNodeSize)
}
// buildL0Table builds a new table from the memtable.
func buildL0Table(ft flushTask, bopts table.Options) []byte {
iter := ft.mt.NewIterator()
defer iter.Close()
b := table.NewTableBuilder(bopts)
defer b.Close()
var vp valuePointer
for iter.SeekToFirst(); iter.Valid(); iter.Next() {
if len(ft.dropPrefixes) > 0 && hasAnyPrefixes(iter.Key(), ft.dropPrefixes) {
continue
}
vs := iter.Value()
if vs.Meta&bitValuePointer > 0 {
vp.Decode(vs.Value)
}
b.Add(iter.Key(), iter.Value(), vp.Len)
}
return b.Finish(true)
}
type flushTask struct {
mt *skl.Skiplist
vptr valuePointer
dropPrefixes [][]byte
}
func (db *DB) pushHead(ft flushTask) error {
// We don't need to store head pointer in the in-memory mode since we will
// never be replay anything.
if db.opt.InMemory {
return nil
}
// Ensure we never push a zero valued head pointer.
if ft.vptr.IsZero() {
return errors.New("Head should not be zero")
}
// Store badger head even if vptr is zero, need it for readTs
db.opt.Infof("Storing value log head: %+v\n", ft.vptr)
val := ft.vptr.Encode()
// Pick the max commit ts, so in case of crash, our read ts would be higher than all the
// commits.
headTs := y.KeyWithTs(head, db.orc.nextTs())
ft.mt.Put(headTs, y.ValueStruct{Value: val})
return nil
}
// handleFlushTask must be run serially.
func (db *DB) handleFlushTask(ft flushTask) error {
// There can be a scenario, when empty memtable is flushed. For example, memtable is empty and
// after writing request to value log, rotation count exceeds db.LogRotatesToFlush.
if ft.mt.Empty() {
return nil
}
if err := db.pushHead(ft); err != nil {
return err
}
dk, err := db.registry.latestDataKey()
if err != nil {
return y.Wrapf(err, "failed to get datakey in db.handleFlushTask")
}
bopts := buildTableOptions(db.opt)
bopts.DataKey = dk
// Builder does not need cache but the same options are used for opening table.
bopts.BlockCache = db.blockCache
bopts.IndexCache = db.indexCache
tableData := buildL0Table(ft, bopts)
fileID := db.lc.reserveFileID()
if db.opt.KeepL0InMemory {
tbl, err := table.OpenInMemoryTable(tableData, fileID, &bopts)
if err != nil {
return errors.Wrapf(err, "failed to open table in memory")
}
return db.lc.addLevel0Table(tbl)
}
fd, err := y.CreateSyncedFile(table.NewFilename(fileID, db.opt.Dir), true)
if err != nil {
return y.Wrap(err)
}
// Don't block just to sync the directory entry.
dirSyncCh := make(chan error, 1)
go func() { dirSyncCh <- db.syncDir(db.opt.Dir) }()
if _, err = fd.Write(tableData); err != nil {
db.opt.Errorf("ERROR while writing to level 0: %v", err)
return err
}
if dirSyncErr := <-dirSyncCh; dirSyncErr != nil {
// Do dir sync as best effort. No need to return due to an error there.
db.opt.Errorf("ERROR while syncing level directory: %v", dirSyncErr)
}
tbl, err := table.OpenTable(fd, bopts)
if err != nil {
db.opt.Debugf("ERROR while opening table: %v", err)
return err
}
// We own a ref on tbl.
err = db.lc.addLevel0Table(tbl) // This will incrRef
_ = tbl.DecrRef() // Releases our ref.
return err
}
// flushMemtable must keep running until we send it an empty flushTask. If there
// are errors during handling the flush task, we'll retry indefinitely.
func (db *DB) flushMemtable(lc *z.Closer) error {
defer lc.Done()
for ft := range db.flushChan {
if ft.mt == nil {
// We close db.flushChan now, instead of sending a nil ft.mt.
continue
}
for {
err := db.handleFlushTask(ft)
if err == nil {
// Update s.imm. Need a lock.
db.Lock()
// This is a single-threaded operation. ft.mt corresponds to the head of
// db.imm list. Once we flush it, we advance db.imm. The next ft.mt
// which would arrive here would match db.imm[0], because we acquire a
// lock over DB when pushing to flushChan.
// TODO: This logic is dirty AF. Any change and this could easily break.
y.AssertTrue(ft.mt == db.imm[0])
db.imm = db.imm[1:]
ft.mt.DecrRef() // Return memory.
db.Unlock()
break
}
// Encountered error. Retry indefinitely.
db.opt.Errorf("Failure while flushing memtable to disk: %v. Retrying...\n", err)
time.Sleep(time.Second)
}
}
return nil
}
func exists(path string) (bool, error) {
_, err := os.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return true, err
}
// This function does a filewalk, calculates the size of vlog and sst files and stores it in
// y.LSMSize and y.VlogSize.
func (db *DB) calculateSize() {
if db.opt.InMemory {
return
}
newInt := func(val int64) *expvar.Int {
v := new(expvar.Int)
v.Add(val)
return v
}
totalSize := func(dir string) (int64, int64) {
var lsmSize, vlogSize int64
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
ext := filepath.Ext(path)
switch ext {
case ".sst":
lsmSize += info.Size()
case ".vlog":
vlogSize += info.Size()
}
return nil
})
if err != nil {
db.opt.Debugf("Got error while calculating total size of directory: %s", dir)
}
return lsmSize, vlogSize
}
lsmSize, vlogSize := totalSize(db.opt.Dir)
y.LSMSize.Set(db.opt.Dir, newInt(lsmSize))
// If valueDir is different from dir, we'd have to do another walk.
if db.opt.ValueDir != db.opt.Dir {
_, vlogSize = totalSize(db.opt.ValueDir)
}
y.VlogSize.Set(db.opt.ValueDir, newInt(vlogSize))
}
func (db *DB) updateSize(lc *z.Closer) {
defer lc.Done()
if db.opt.InMemory {
return
}
metricsTicker := time.NewTicker(time.Minute)
defer metricsTicker.Stop()
for {
select {
case <-metricsTicker.C:
db.calculateSize()
case <-lc.HasBeenClosed():
return
}
}
}
// RunValueLogGC triggers a value log garbage collection.
//
// It picks value log files to perform GC based on statistics that are collected
// during compactions. If no such statistics are available, then log files are
// picked in random order. The process stops as soon as the first log file is
// encountered which does not result in garbage collection.
//
// When a log file is picked, it is first sampled. If the sample shows that we
// can discard at least discardRatio space of that file, it would be rewritten.
//
// If a call to RunValueLogGC results in no rewrites, then an ErrNoRewrite is
// thrown indicating that the call resulted in no file rewrites.
//
// We recommend setting discardRatio to 0.5, thus indicating that a file be
// rewritten if half the space can be discarded. This results in a lifetime
// value log write amplification of 2 (1 from original write + 0.5 rewrite +
// 0.25 + 0.125 + ... = 2). Setting it to higher value would result in fewer
// space reclaims, while setting it to a lower value would result in more space
// reclaims at the cost of increased activity on the LSM tree. discardRatio
// must be in the range (0.0, 1.0), both endpoints excluded, otherwise an
// ErrInvalidRequest is returned.
//
// Only one GC is allowed at a time. If another value log GC is running, or DB
// has been closed, this would return an ErrRejected.
//
// Note: Every time GC is run, it would produce a spike of activity on the LSM
// tree.
func (db *DB) RunValueLogGC(discardRatio float64) error {
if db.opt.InMemory {
return ErrGCInMemoryMode
}
if discardRatio >= 1.0 || discardRatio <= 0.0 {
return ErrInvalidRequest
}
// startLevel is the level from which we should search for the head key. When badger is running
// with KeepL0InMemory flag, all tables on L0 are kept in memory. This means we should pick head
// key from Level 1 onwards because if we pick the headkey from Level 0 we might end up losing
// data. See test TestL0GCBug.
startLevel := 0
if db.opt.KeepL0InMemory {
startLevel = 1
}
// Find head on disk
headKey := y.KeyWithTs(head, math.MaxUint64)
// Need to pass with timestamp, lsm get removes the last 8 bytes and compares key
val, err := db.lc.get(headKey, nil, startLevel)
if err != nil {
return errors.Wrap(err, "Retrieving head from on-disk LSM")
}
var head valuePointer
if len(val.Value) > 0 {
head.Decode(val.Value)
}
// Pick a log file and run GC
return db.vlog.runGC(discardRatio, head)
}
// Size returns the size of lsm and value log files in bytes. It can be used to decide how often to
// call RunValueLogGC.
func (db *DB) Size() (lsm, vlog int64) {
if y.LSMSize.Get(db.opt.Dir) == nil {
lsm, vlog = 0, 0
return
}
lsm = y.LSMSize.Get(db.opt.Dir).(*expvar.Int).Value()
vlog = y.VlogSize.Get(db.opt.ValueDir).(*expvar.Int).Value()
return
}
// Sequence represents a Badger sequence.
type Sequence struct {
sync.Mutex
db *DB
key []byte
next uint64
leased uint64
bandwidth uint64
}
// Next would return the next integer in the sequence, updating the lease by running a transaction
// if needed.
func (seq *Sequence) Next() (uint64, error) {
seq.Lock()
defer seq.Unlock()
if seq.next >= seq.leased {
if err := seq.updateLease(); err != nil {
return 0, err
}
}
val := seq.next
seq.next++
return val, nil
}
// Release the leased sequence to avoid wasted integers. This should be done right
// before closing the associated DB. However it is valid to use the sequence after
// it was released, causing a new lease with full bandwidth.
func (seq *Sequence) Release() error {
seq.Lock()
defer seq.Unlock()
err := seq.db.Update(func(txn *Txn) error {
item, err := txn.Get(seq.key)
if err != nil {
return err
}
var num uint64
if err := item.Value(func(v []byte) error {
num = binary.BigEndian.Uint64(v)
return nil
}); err != nil {
return err
}
if num == seq.leased {
var buf [8]byte
binary.BigEndian.PutUint64(buf[:], seq.next)
return txn.SetEntry(NewEntry(seq.key, buf[:]))
}
return nil
})
if err != nil {
return err
}
seq.leased = seq.next
return nil
}
func (seq *Sequence) updateLease() error {
return seq.db.Update(func(txn *Txn) error {
item, err := txn.Get(seq.key)
switch {
case err == ErrKeyNotFound:
seq.next = 0
case err != nil:
return err
default:
var num uint64
if err := item.Value(func(v []byte) error {
num = binary.BigEndian.Uint64(v)
return nil
}); err != nil {
return err
}
seq.next = num
}
lease := seq.next + seq.bandwidth
var buf [8]byte
binary.BigEndian.PutUint64(buf[:], lease)
if err = txn.SetEntry(NewEntry(seq.key, buf[:])); err != nil {
return err
}
seq.leased = lease
return nil
})
}
// GetSequence would initiate a new sequence object, generating it from the stored lease, if
// available, in the database. Sequence can be used to get a list of monotonically increasing
// integers. Multiple sequences can be created by providing different keys. Bandwidth sets the
// size of the lease, determining how many Next() requests can be served from memory.
//
// GetSequence is not supported on ManagedDB. Calling this would result in a panic.
func (db *DB) GetSequence(key []byte, bandwidth uint64) (*Sequence, error) {
if db.opt.managedTxns {
panic("Cannot use GetSequence with managedDB=true.")
}
switch {
case len(key) == 0:
return nil, ErrEmptyKey
case bandwidth == 0:
return nil, ErrZeroBandwidth
}
seq := &Sequence{
db: db,
key: key,
next: 0,
leased: 0,
bandwidth: bandwidth,
}
err := seq.updateLease()
return seq, err
}
// Tables gets the TableInfo objects from the level controller. If withKeysCount
// is true, TableInfo objects also contain counts of keys for the tables.
func (db *DB) Tables(withKeysCount bool) []TableInfo {
return db.lc.getTableInfo(withKeysCount)
}
// KeySplits can be used to get rough key ranges to divide up iteration over
// the DB.
func (db *DB) KeySplits(prefix []byte) []string {
var splits []string
// We just want table ranges here and not keys count.
for _, ti := range db.Tables(false) {
// We don't use ti.Left, because that has a tendency to store !badger
// keys.
if bytes.HasPrefix(ti.Right, prefix) {
splits = append(splits, string(ti.Right))
}
}
sort.Strings(splits)
return splits
}
// MaxBatchCount returns max possible entries in batch
func (db *DB) MaxBatchCount() int64 {
return db.opt.maxBatchCount
}
// MaxBatchSize returns max possible batch size
func (db *DB) MaxBatchSize() int64 {
return db.opt.maxBatchSize
}
func (db *DB) stopMemoryFlush() {
// Stop memtable flushes.
if db.closers.memtable != nil {
close(db.flushChan)
db.closers.memtable.SignalAndWait()
}
}
func (db *DB) stopCompactions() {
// Stop compactions.
if db.closers.compactors != nil {
db.closers.compactors.SignalAndWait()
}
}
func (db *DB) startCompactions() {
// Resume compactions.
if db.closers.compactors != nil {
db.closers.compactors = z.NewCloser(1)
db.lc.startCompact(db.closers.compactors)
}
}
func (db *DB) startMemoryFlush() {
// Start memory fluhser.
if db.closers.memtable != nil {
db.flushChan = make(chan flushTask, db.opt.NumMemtables)
db.closers.memtable = z.NewCloser(1)
go func() {
_ = db.flushMemtable(db.closers.memtable)
}()
}
}
// Flatten can be used to force compactions on the LSM tree so all the tables fall on the same
// level. This ensures that all the versions of keys are colocated and not split across multiple
// levels, which is necessary after a restore from backup. During Flatten, live compactions are
// stopped. Ideally, no writes are going on during Flatten. Otherwise, it would create competition
// between flattening the tree and new tables being created at level zero.
func (db *DB) Flatten(workers int) error {
db.stopCompactions()
defer db.startCompactions()
compactAway := func(cp compactionPriority) error {
db.opt.Infof("Attempting to compact with %+v\n", cp)
errCh := make(chan error, 1)
for i := 0; i < workers; i++ {
go func() {
errCh <- db.lc.doCompact(175, cp)
}()
}
var success int
var rerr error
for i := 0; i < workers; i++ {
err := <-errCh
if err != nil {
rerr = err
db.opt.Warningf("While running doCompact with %+v. Error: %v\n", cp, err)
} else {
success++
}
}
if success == 0 {
return rerr
}
// We could do at least one successful compaction. So, we'll consider this a success.
db.opt.Infof("%d compactor(s) succeeded. One or more tables from level %d compacted.\n",
success, cp.level)
return nil
}
hbytes := func(sz int64) string {
return humanize.Bytes(uint64(sz))
}
for {
db.opt.Infof("\n")
var levels []int
for i, l := range db.lc.levels {
sz := l.getTotalSize()
db.opt.Infof("Level: %d. %8s Size. %8s Max.\n",
i, hbytes(l.getTotalSize()), hbytes(l.maxTotalSize))
if sz > 0 {
levels = append(levels, i)
}
}
if len(levels) <= 1 {
prios := db.lc.pickCompactLevels()
if len(prios) == 0 || prios[0].score <= 1.0 {
db.opt.Infof("All tables consolidated into one level. Flattening done.\n")
return nil
}
if err := compactAway(prios[0]); err != nil {
return err
}
continue
}
// Create an artificial compaction priority, to ensure that we compact the level.
cp := compactionPriority{level: levels[0], score: 1.71}
if err := compactAway(cp); err != nil {
return err
}
}
}
func (db *DB) blockWrite() error {
// Stop accepting new writes.
if !atomic.CompareAndSwapInt32(&db.blockWrites, 0, 1) {
return ErrBlockedWrites
}
// Make all pending writes finish. The following will also close writeCh.
db.closers.writes.SignalAndWait()
db.opt.Infof("Writes flushed. Stopping compactions now...")
return nil
}
func (db *DB) unblockWrite() {
db.closers.writes = z.NewCloser(1)
go db.doWrites(db.closers.writes)
// Resume writes.
atomic.StoreInt32(&db.blockWrites, 0)
}
func (db *DB) prepareToDrop() (func(), error) {
if db.opt.ReadOnly {
panic("Attempting to drop data in read-only mode.")
}
// In order prepare for drop, we need to block the incoming writes and
// write it to db. Then, flush all the pending flushtask. So that, we
// don't miss any entries.
if err := db.blockWrite(); err != nil {
return nil, err
}
reqs := make([]*request, 0, 10)
for {
select {
case r := <-db.writeCh:
reqs = append(reqs, r)
default:
if err := db.writeRequests(reqs); err != nil {
db.opt.Errorf("writeRequests: %v", err)
}
db.stopMemoryFlush()
return func() {
db.opt.Infof("Resuming writes")
db.startMemoryFlush()
db.unblockWrite()
}, nil
}
}
}
// DropAll would drop all the data stored in Badger. It does this in the following way.
// - Stop accepting new writes.
// - Pause memtable flushes and compactions.
// - Pick all tables from all levels, create a changeset to delete all these
// tables and apply it to manifest.
// - Pick all log files from value log, and delete all of them. Restart value log files from zero.
// - Resume memtable flushes and compactions.
//
// NOTE: DropAll is resilient to concurrent writes, but not to reads. It is up to the user to not do
// any reads while DropAll is going on, otherwise they may result in panics. Ideally, both reads and
// writes are paused before running DropAll, and resumed after it is finished.
func (db *DB) DropAll() error {
f, err := db.dropAll()
if f != nil {
f()
}
return err
}
func (db *DB) dropAll() (func(), error) {
db.opt.Infof("DropAll called. Blocking writes...")
f, err := db.prepareToDrop()
if err != nil {
return f, err
}
// prepareToDrop will stop all the incomming write and flushes any pending flush tasks.
// Before we drop, we'll stop the compaction because anyways all the datas are going to
// be deleted.
db.stopCompactions()
resume := func() {
db.startCompactions()
f()
}
// Block all foreign interactions with memory tables.
db.Lock()
defer db.Unlock()
// Remove inmemory tables. Calling DecrRef for safety. Not sure if they're absolutely needed.
db.mt.DecrRef()
for _, mt := range db.imm {
mt.DecrRef()
}
db.imm = db.imm[:0]
db.mt = skl.NewSkiplist(arenaSize(db.opt)) // Set it up for future writes.
num, err := db.lc.dropTree()
if err != nil {
return resume, err
}
db.opt.Infof("Deleted %d SSTables. Now deleting value logs...\n", num)
num, err = db.vlog.dropAll()
if err != nil {
return resume, err
}
db.vhead = valuePointer{} // Zero it out.
db.lc.nextFileID = 1
db.opt.Infof("Deleted %d value log files. DropAll done.\n", num)
db.blockCache.Clear()
db.indexCache.Clear()
return resume, nil
}
// DropPrefix would drop all the keys with the provided prefix. It does this in the following way:
// - Stop accepting new writes.
// - Stop memtable flushes before acquiring lock. Because we're acquring lock here
// and memtable flush stalls for lock, which leads to deadlock
// - Flush out all memtables, skipping over keys with the given prefix, Kp.
// - Write out the value log header to memtables when flushing, so we don't accidentally bring Kp
// back after a restart.
// - Stop compaction.
// - Compact L0->L1, skipping over Kp.
// - Compact rest of the levels, Li->Li, picking tables which have Kp.
// - Resume memtable flushes, compactions and writes.
func (db *DB) DropPrefix(prefixes ...[]byte) error {
db.opt.Infof("DropPrefix Called")
f, err := db.prepareToDrop()
if err != nil {
return err
}
defer f()
// Block all foreign interactions with memory tables.
db.Lock()
defer db.Unlock()
db.imm = append(db.imm, db.mt)
for _, memtable := range db.imm {
if memtable.Empty() {
memtable.DecrRef()
continue
}
task := flushTask{
mt: memtable,
// Ensure that the head of value log gets persisted to disk.
vptr: db.vhead,
dropPrefixes: prefixes,
}
db.opt.Debugf("Flushing memtable")
if err := db.handleFlushTask(task); err != nil {
db.opt.Errorf("While trying to flush memtable: %v", err)
return err
}
memtable.DecrRef()
}
db.stopCompactions()
defer db.startCompactions()
db.imm = db.imm[:0]
db.mt = skl.NewSkiplist(arenaSize(db.opt))
// Drop prefixes from the levels.
if err := db.lc.dropPrefixes(prefixes); err != nil {
return err
}
db.opt.Infof("DropPrefix done")
return nil
}
// KVList contains a list of key-value pairs.
type KVList = pb.KVList
// Subscribe can be used to watch key changes for the given key prefixes.
// At least one prefix should be passed, or an error will be returned.
// You can use an empty prefix to monitor all changes to the DB.
// This function blocks until the given context is done or an error occurs.
// The given function will be called with a new KVList containing the modified keys and the
// corresponding values.
func (db *DB) Subscribe(ctx context.Context, cb func(kv *KVList) error, prefixes ...[]byte) error {
if cb == nil {
return ErrNilCallback
}
c := z.NewCloser(1)
recvCh, id := db.pub.newSubscriber(c, prefixes...)
slurp := func(batch *pb.KVList) error {
for {
select {
case kvs := <-recvCh:
batch.Kv = append(batch.Kv, kvs.Kv...)
default:
if len(batch.GetKv()) > 0 {
return cb(batch)
}
return nil
}
}
}
for {
select {
case <-c.HasBeenClosed():
// No need to delete here. Closer will be called only while
// closing DB. Subscriber will be deleted by cleanSubscribers.
err := slurp(new(pb.KVList))
// Drain if any pending updates.
c.Done()
return err
case <-ctx.Done():
c.Done()
db.pub.deleteSubscriber(id)
// Delete the subscriber to avoid further updates.
return ctx.Err()
case batch := <-recvCh:
err := slurp(batch)
if err != nil {
c.Done()
// Delete the subscriber if there is an error by the callback.
db.pub.deleteSubscriber(id)
return err
}
}
}
}
// shouldEncrypt returns bool, which tells whether to encrypt or not.
func (db *DB) shouldEncrypt() bool {
return len(db.opt.EncryptionKey) > 0
}
func (db *DB) syncDir(dir string) error {
if db.opt.InMemory {
return nil
}
return syncDir(dir)
}
func createDirs(opt Options) error {
for _, path := range []string{opt.Dir, opt.ValueDir} {
dirExists, err := exists(path)
if err != nil {
return y.Wrapf(err, "Invalid Dir: %q", path)
}
if !dirExists {
if opt.ReadOnly {
return errors.Errorf("Cannot find directory %q for read-only open", path)
}
// Try to create the directory
err = os.MkdirAll(path, 0700)
if err != nil {
return y.Wrapf(err, "Error Creating Dir: %q", path)
}
}
}
return nil
}
// Stream the contents of this DB to a new DB with options outOptions that will be
// created in outDir.
func (db *DB) StreamDB(outOptions Options) error {
outDir := outOptions.Dir
// Open output DB.
outDB, err := OpenManaged(outOptions)
if err != nil {
return errors.Wrapf(err, "cannot open out DB at %s", outDir)
}
defer outDB.Close()
writer := outDB.NewStreamWriter()
if err := writer.Prepare(); err != nil {
errors.Wrapf(err, "cannot create stream writer in out DB at %s", outDir)
}
// Stream contents of DB to the output DB.
stream := db.NewStreamAt(math.MaxUint64)
stream.LogPrefix = fmt.Sprintf("Streaming DB to new DB at %s", outDir)
stream.Send = func(kvs *pb.KVList) error {
return writer.Write(kvs)
}
if err := stream.Orchestrate(context.Background()); err != nil {
return errors.Wrapf(err, "cannot stream DB to out DB at %s", outDir)
}
if err := writer.Flush(); err != nil {
return errors.Wrapf(err, "cannot flush writer")
}
return nil
}
|
package main
import (
"bytes"
"io/ioutil"
"log"
"net/http"
)
type USER struct {
Name string
LineID string
}
var userDb_url = "https://api.mlab.com/api/1/databases/heroku_h1g317z7/collections/_User?apiKey=1S26M0Ti2t7gKunYRJiGNg8aeIMXnptN"
var msgDb_url = "https://api.mlab.com/api/1/databases/heroku_h1g317z7/collections/Message?apiKey=1S26M0Ti2t7gKunYRJiGNg8aeIMXnptN"
func getAllUser() {
resp, err := http.Get(userDb_url)
if err != nil {
println(err.Error())
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
log.Println("mLab User", string(body))
}
func addNewUser(ID string) {
var sendingMsg = `{"lineID":` + ID + `}`
var jsonStr = []byte(sendingMsg)
req, err := http.NewRequest("POST", userDb_url, bytes.NewBuffer(jsonStr))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
}
func addMessageFromUser(msg string, fromUserId string) {
var sendingMsg = `{"msg":` + msg + `,"fromUserId:` + fromUserId + `,"replyBool":false,"replyMsg":""}`
log.Println(sendingMsg)
var jsonStr = []byte(sendingMsg)
req, err := http.NewRequest("POST", msgDb_url, bytes.NewBuffer(jsonStr))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
}
v.3.6.6
package main
import (
"bytes"
"io/ioutil"
"log"
"net/http"
)
type USER struct {
Name string
LineID string
}
var userDb_url = "https://api.mlab.com/api/1/databases/heroku_h1g317z7/collections/_User?apiKey=1S26M0Ti2t7gKunYRJiGNg8aeIMXnptN"
var msgDb_url = "https://api.mlab.com/api/1/databases/heroku_h1g317z7/collections/Message?apiKey=1S26M0Ti2t7gKunYRJiGNg8aeIMXnptN"
func getAllUser() {
resp, err := http.Get(userDb_url)
if err != nil {
println(err.Error())
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
log.Println("mLab User", string(body))
}
func addNewUser(ID string) {
var sendingMsg = `{"lineID":` + ID + `}`
var jsonStr = []byte(sendingMsg)
req, err := http.NewRequest("POST", userDb_url, bytes.NewBuffer(jsonStr))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
}
func addMessageFromUser(msg string, fromUserId string) {
var sendingMsg = `{"msg":` + msg + `,"fromUserId":` + fromUserId + `,"replyBool":false,"replyMsg":""}`
log.Println(sendingMsg)
var jsonStr = []byte(sendingMsg)
req, err := http.NewRequest("POST", msgDb_url, bytes.NewBuffer(jsonStr))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
}
|
package meter
import (
"net/url"
"strconv"
"sync"
"time"
"github.com/go-redis/redis"
)
const DefaultKeyPrefix = "meter:"
type DB struct {
Redis redis.UniversalClient
// *Registry
KeyPrefix string
}
func NewDB(r redis.UniversalClient) *DB {
db := new(DB)
db.Redis = r
// db.Registry = defaultRegistry
db.KeyPrefix = DefaultKeyPrefix
return db
}
func (e *Desc) MatchingQueries(q url.Values) url.Values {
if e == nil || q == nil {
return nil
}
m := make(map[string][]string, len(q))
for key, values := range q {
if e.HasLabel(key) {
m[key] = values
}
}
return m
}
const LabelSeparator = '\x1f'
const FieldTerminator = '\x1e'
func (db DB) Key(r Resolution, event string, t time.Time) (k string) {
return string(db.AppendKey(nil, r, event, t))
}
func (db DB) AppendKey(data []byte, r Resolution, event string, t time.Time) []byte {
if db.KeyPrefix != "" {
data = append(data, db.KeyPrefix...)
data = append(data, LabelSeparator)
}
data = append(data, r.Name()...)
data = append(data, LabelSeparator)
data = append(data, r.MarshalTime(t)...)
data = append(data, LabelSeparator)
data = append(data, event...)
return data
}
const NilByte byte = 0
const sNilByte = "\x00"
func AppendField(data []byte, labels, values []string) []byte {
n := len(values)
for i := 0; i < len(labels); i++ {
label := labels[i]
if i != 0 {
data = append(data, LabelSeparator)
}
data = append(data, label...)
data = append(data, LabelSeparator)
if i < n {
value := values[i]
data = append(data, value...)
} else {
data = append(data, NilByte)
}
}
data = append(data, FieldTerminator)
return data
}
// func (db *DB) Sync() error {
// return db.SyncAt(time.Now())
// }
// func (db *DB) SyncAt(tm time.Time) error {
// return db.Registry.Sync(db, tm)
// }
func (db *DB) Gather(col Collector, tm time.Time) (pipelineSize int64, err error) {
pipeline := db.Redis.Pipeline()
defer pipeline.Close()
ch := make(chan Metric)
size := make(chan int64)
go func() {
var psize int64
data := []byte{}
keysTTL := make(map[string]time.Duration)
for m := range ch {
if m == nil {
continue
}
n := m.Set(0)
if n == 0 {
continue
}
values := m.Values()
desc := m.Describe()
name := desc.Name()
labels := desc.Labels()
data = AppendField(data[:0], labels, values)
field := string(data)
t := desc.Type()
for _, res := range desc.Resolutions() {
data = db.AppendKey(data[:0], res, name, tm)
key := string(data)
keysTTL[key] = res.TTL()
switch t {
case MetricTypeIncrement:
pipeline.HIncrBy(key, field, n)
case MetricTypeUpdateOnce:
pipeline.HSetNX(key, field, n)
case MetricTypeUpdate:
pipeline.HSet(key, field, n)
default:
continue
}
psize++
}
}
for key, ttl := range keysTTL {
pipeline.Expire(key, ttl)
psize++
}
size <- psize
}()
col.Collect(ch)
close(ch)
pipelineSize = <-size
if pipelineSize != 0 {
_, err = pipeline.Exec()
}
return
}
type ScanResult struct {
Name string
Time time.Time
Group []string
Values LabelValues
err error
count int64
}
func AppendMatch(data []byte, s string) []byte {
for i := 0; i < len(s); i++ {
switch b := s[i]; b {
case '*', '[', ']', '?', '^':
data = append(data, '\\', b)
default:
data = append(data, b)
}
}
return data
}
func AppendMatchField(data []byte, labels []string, group []string, q map[string]string) []byte {
if len(group) == 0 && len(q) == 0 {
return append(data, '*')
}
for i := 0; i < len(labels); i++ {
if i != 0 {
data = append(data, LabelSeparator)
}
label := labels[i]
data = AppendMatch(data, label)
data = append(data, LabelSeparator)
if indexOf(group, label) >= 0 {
data = append(data, '[', '^', NilByte, ']', '*')
continue
}
if q != nil {
if v, ok := q[label]; ok {
data = AppendMatch(data, v)
continue
}
}
data = append(data, '*')
}
data = append(data, FieldTerminator)
return data
}
func (db *DB) Query(queries ...Query) (Results, error) {
if len(queries) == 0 {
return Results{}, nil
}
scan := make(chan ScanResult, len(queries))
results := CollectResults(scan)
wg := new(sync.WaitGroup)
for _, q := range queries {
wg.Add(1)
go func(q Query) {
switch q.Mode {
case QueryModeExact:
db.ExactQuery(scan, q)
case QueryModeScan:
db.ScanQuery(scan, q)
}
wg.Done()
}(q)
}
wg.Wait()
close(scan)
if r := <-results; r != nil {
return r, nil
}
return Results{}, nil
}
func (db *DB) ExactQuery(results chan<- ScanResult, q Query) error {
var replies []*redis.StringCmd
if err := q.Error(); err != nil {
r := ScanResult{err: err}
results <- r
}
// Field/Key buffer
data := []byte{}
res := q.Resolution
ts := res.TimeSequence(q.Start, q.End)
desc := q.Event.Describe()
labels := desc.Labels()
qValues := QueryPermutations(q.Values)
pipeline := db.Redis.Pipeline()
defer pipeline.Close()
for _, values := range qValues {
data = AppendField(data[:0], labels, LabelValues(values).Values(labels))
field := string(data)
for _, tm := range ts {
data = db.AppendKey(data[:0], res, desc.Name(), tm)
key := string(data)
replies = append(replies, pipeline.HGet(key, field))
}
}
if len(replies) == 0 {
return nil
}
if _, err := pipeline.Exec(); err != nil && err != redis.Nil {
return err
}
for i, values := range qValues {
for j, tm := range ts {
reply := replies[i*len(ts)+j]
n, err := reply.Int64()
results <- ScanResult{
Name: desc.Name(),
Time: tm,
Values: values,
count: n,
err: err,
}
}
}
return nil
}
func (db *DB) ScanQuery(results chan<- ScanResult, q Query) (err error) {
if e := q.Error(); e != nil {
results <- ScanResult{err: e}
return
}
desc := q.Event.Describe()
result := ScanResult{
Name: desc.Name(),
Group: q.Group,
}
res := q.Resolution
ts := res.TimeSequence(q.Start, q.End)
if len(ts) == 0 {
return
}
qValues := QueryPermutations(q.Values)
if len(qValues) == 0 {
qValues = append(qValues, map[string]string{})
}
wg := &sync.WaitGroup{}
for _, values := range qValues {
result.Values = values
// data = AppendField(data, desc.Labels(), m.Values())
data := AppendMatchField(nil, desc.Labels(), q.Group, values)
match := string(data)
// Let redis client pool size determine parallel request blocking
for _, tm := range ts {
result.Time = tm
data = db.AppendKey(data[:0], res, desc.Name(), tm)
key := string(data)
wg.Add(1)
go func(r ScanResult, key string) {
db.Scan(key, match, r, results)
wg.Done()
}(result, key)
}
}
wg.Wait()
return nil
}
func parseField(values []string, field string) []string {
offset := 0
for i := 0; i < len(field); i++ {
switch field[i] {
case LabelSeparator:
values = append(values, field[offset:i])
offset = i + 1
case FieldTerminator:
values = append(values, field[offset:i])
offset = i + 1
break
}
}
if offset < len(field) {
values = append(values, field[offset:])
}
return values
}
// func fieldIndexOf(field []string, v string) int {
// n := len(field)
// n -= n % 2
// for i := 0; i < n; i += 2 {
// if field[i] == v {
// return i
// }
// }
// return -1
// }
// func parseGroup(pairs []string, group string) LabelValues {
// pairs = parseField(pairs, group)
// n := len(pairs)
// n -= n % 2
// values := make(map[string]string, n/2)
// for i := 0; i < n; i += 2 {
// if v := pairs[i+1]; v != sNilByte {
// values[pairs[i]] = pairs[i+1]
// }
// }
// return LabelValues(values)
// }
// func scanField(val []byte, field []string, group []string) ([]byte, bool) {
// for i := 0; i < len(group); i++ {
// if j := fieldIndexOf(field, group[i]); j < 0 || field[j+1] == sNilByte {
// return val, false
// } else {
// if i != 0 {
// val = append(val, LabelSeparator)
// }
// val = append(val, group[i]...)
// val = append(val, LabelSeparator)
// val = append(val, field[j+1]...)
// }
// }
// val = append(val, FieldTerminator)
// return val, true
// }
// const sLabelSeparator = "\x1f"
func (db *DB) Scan(key, match string, r ScanResult, results chan<- ScanResult) (err error) {
scan := db.Redis.HScan(key, 0, match, -1).Iterator()
i := 0
var pairs []string
group := len(r.Group) != 0
for scan.Next() {
if i%2 == 0 {
if group {
pairs = parseField(pairs[:0], scan.Val())
r.Values = FieldLabels(pairs)
}
} else {
r.count, r.err = strconv.ParseInt(scan.Val(), 10, 64)
results <- r
}
i++
}
if err = scan.Err(); err != nil {
return
}
return
}
type pair struct {
Label, Value string
Count int64
}
type FrequencyMap map[string]map[string]int64
// ValueScan return a frequency map of event label values
func (db *DB) ValueScan(event Descriptor, res Resolution, start, end time.Time) FrequencyMap {
desc := event.Describe()
labels := desc.Labels()
ch := make(chan pair, len(labels))
result := make(chan FrequencyMap)
go func() {
results := FrequencyMap{}
for i := 0; i < len(labels); i++ {
results[labels[i]] = make(map[string]int64)
}
for p := range ch {
results[p.Label][p.Value] += p.Count
}
result <- results
}()
wg := new(sync.WaitGroup)
data := []byte{}
ts := TimeSequence(start, end, res.Step())
for i := range ts {
wg.Add(1)
data = db.AppendKey(data[:0], res, desc.Name(), ts[i])
key := string(data)
go func(key string) {
var n int64
field := make([]string, len(labels))
scan := db.Redis.HScan(key, 0, "*", -1).Iterator()
i := 0
for scan.Next() {
if i%2 == 0 {
field = parseField(field[:0], scan.Val())
} else if n, _ = strconv.ParseInt(scan.Val(), 10, 64); n != 0 {
for j := 0; j < len(field); j += 2 {
if label, val := field[j], field[j+1]; val != sNilByte {
ch <- pair{label, val, n}
}
}
}
i++
}
wg.Done()
}(key)
}
wg.Wait()
close(ch)
return <-result
}
Fix default key prefix
package meter
import (
"net/url"
"strconv"
"sync"
"time"
"github.com/go-redis/redis"
)
const DefaultKeyPrefix = "meter"
type DB struct {
Redis redis.UniversalClient
// *Registry
KeyPrefix string
}
func NewDB(r redis.UniversalClient) *DB {
db := new(DB)
db.Redis = r
// db.Registry = defaultRegistry
db.KeyPrefix = DefaultKeyPrefix
return db
}
func (e *Desc) MatchingQueries(q url.Values) url.Values {
if e == nil || q == nil {
return nil
}
m := make(map[string][]string, len(q))
for key, values := range q {
if e.HasLabel(key) {
m[key] = values
}
}
return m
}
const LabelSeparator = '\x1f'
const FieldTerminator = '\x1e'
func (db DB) Key(r Resolution, event string, t time.Time) (k string) {
return string(db.AppendKey(nil, r, event, t))
}
func (db DB) AppendKey(data []byte, r Resolution, event string, t time.Time) []byte {
if db.KeyPrefix != "" {
data = append(data, db.KeyPrefix...)
data = append(data, LabelSeparator)
}
data = append(data, r.Name()...)
data = append(data, LabelSeparator)
data = append(data, r.MarshalTime(t)...)
data = append(data, LabelSeparator)
data = append(data, event...)
return data
}
const NilByte byte = 0
const sNilByte = "\x00"
func AppendField(data []byte, labels, values []string) []byte {
n := len(values)
for i := 0; i < len(labels); i++ {
label := labels[i]
if i != 0 {
data = append(data, LabelSeparator)
}
data = append(data, label...)
data = append(data, LabelSeparator)
if i < n {
value := values[i]
data = append(data, value...)
} else {
data = append(data, NilByte)
}
}
data = append(data, FieldTerminator)
return data
}
// func (db *DB) Sync() error {
// return db.SyncAt(time.Now())
// }
// func (db *DB) SyncAt(tm time.Time) error {
// return db.Registry.Sync(db, tm)
// }
func (db *DB) Gather(col Collector, tm time.Time) (pipelineSize int64, err error) {
pipeline := db.Redis.Pipeline()
defer pipeline.Close()
ch := make(chan Metric)
size := make(chan int64)
go func() {
var psize int64
data := []byte{}
keysTTL := make(map[string]time.Duration)
for m := range ch {
if m == nil {
continue
}
n := m.Set(0)
if n == 0 {
continue
}
values := m.Values()
desc := m.Describe()
name := desc.Name()
labels := desc.Labels()
data = AppendField(data[:0], labels, values)
field := string(data)
t := desc.Type()
for _, res := range desc.Resolutions() {
data = db.AppendKey(data[:0], res, name, tm)
key := string(data)
keysTTL[key] = res.TTL()
switch t {
case MetricTypeIncrement:
pipeline.HIncrBy(key, field, n)
case MetricTypeUpdateOnce:
pipeline.HSetNX(key, field, n)
case MetricTypeUpdate:
pipeline.HSet(key, field, n)
default:
continue
}
psize++
}
}
for key, ttl := range keysTTL {
pipeline.Expire(key, ttl)
psize++
}
size <- psize
}()
col.Collect(ch)
close(ch)
pipelineSize = <-size
if pipelineSize != 0 {
_, err = pipeline.Exec()
}
return
}
type ScanResult struct {
Name string
Time time.Time
Group []string
Values LabelValues
err error
count int64
}
func AppendMatch(data []byte, s string) []byte {
for i := 0; i < len(s); i++ {
switch b := s[i]; b {
case '*', '[', ']', '?', '^':
data = append(data, '\\', b)
default:
data = append(data, b)
}
}
return data
}
func AppendMatchField(data []byte, labels []string, group []string, q map[string]string) []byte {
if len(group) == 0 && len(q) == 0 {
return append(data, '*')
}
for i := 0; i < len(labels); i++ {
if i != 0 {
data = append(data, LabelSeparator)
}
label := labels[i]
data = AppendMatch(data, label)
data = append(data, LabelSeparator)
if indexOf(group, label) >= 0 {
data = append(data, '[', '^', NilByte, ']', '*')
continue
}
if q != nil {
if v, ok := q[label]; ok {
data = AppendMatch(data, v)
continue
}
}
data = append(data, '*')
}
data = append(data, FieldTerminator)
return data
}
func (db *DB) Query(queries ...Query) (Results, error) {
if len(queries) == 0 {
return Results{}, nil
}
scan := make(chan ScanResult, len(queries))
results := CollectResults(scan)
wg := new(sync.WaitGroup)
for _, q := range queries {
wg.Add(1)
go func(q Query) {
switch q.Mode {
case QueryModeExact:
db.ExactQuery(scan, q)
case QueryModeScan:
db.ScanQuery(scan, q)
}
wg.Done()
}(q)
}
wg.Wait()
close(scan)
if r := <-results; r != nil {
return r, nil
}
return Results{}, nil
}
func (db *DB) ExactQuery(results chan<- ScanResult, q Query) error {
var replies []*redis.StringCmd
if err := q.Error(); err != nil {
r := ScanResult{err: err}
results <- r
}
// Field/Key buffer
data := []byte{}
res := q.Resolution
ts := res.TimeSequence(q.Start, q.End)
desc := q.Event.Describe()
labels := desc.Labels()
qValues := QueryPermutations(q.Values)
pipeline := db.Redis.Pipeline()
defer pipeline.Close()
for _, values := range qValues {
data = AppendField(data[:0], labels, LabelValues(values).Values(labels))
field := string(data)
for _, tm := range ts {
data = db.AppendKey(data[:0], res, desc.Name(), tm)
key := string(data)
replies = append(replies, pipeline.HGet(key, field))
}
}
if len(replies) == 0 {
return nil
}
if _, err := pipeline.Exec(); err != nil && err != redis.Nil {
return err
}
for i, values := range qValues {
for j, tm := range ts {
reply := replies[i*len(ts)+j]
n, err := reply.Int64()
results <- ScanResult{
Name: desc.Name(),
Time: tm,
Values: values,
count: n,
err: err,
}
}
}
return nil
}
func (db *DB) ScanQuery(results chan<- ScanResult, q Query) (err error) {
if e := q.Error(); e != nil {
results <- ScanResult{err: e}
return
}
desc := q.Event.Describe()
result := ScanResult{
Name: desc.Name(),
Group: q.Group,
}
res := q.Resolution
ts := res.TimeSequence(q.Start, q.End)
if len(ts) == 0 {
return
}
qValues := QueryPermutations(q.Values)
if len(qValues) == 0 {
qValues = append(qValues, map[string]string{})
}
wg := &sync.WaitGroup{}
for _, values := range qValues {
result.Values = values
// data = AppendField(data, desc.Labels(), m.Values())
data := AppendMatchField(nil, desc.Labels(), q.Group, values)
match := string(data)
// Let redis client pool size determine parallel request blocking
for _, tm := range ts {
result.Time = tm
data = db.AppendKey(data[:0], res, desc.Name(), tm)
key := string(data)
wg.Add(1)
go func(r ScanResult, key string) {
db.Scan(key, match, r, results)
wg.Done()
}(result, key)
}
}
wg.Wait()
return nil
}
func parseField(values []string, field string) []string {
offset := 0
for i := 0; i < len(field); i++ {
switch field[i] {
case LabelSeparator:
values = append(values, field[offset:i])
offset = i + 1
case FieldTerminator:
values = append(values, field[offset:i])
offset = i + 1
break
}
}
if offset < len(field) {
values = append(values, field[offset:])
}
return values
}
// func fieldIndexOf(field []string, v string) int {
// n := len(field)
// n -= n % 2
// for i := 0; i < n; i += 2 {
// if field[i] == v {
// return i
// }
// }
// return -1
// }
// func parseGroup(pairs []string, group string) LabelValues {
// pairs = parseField(pairs, group)
// n := len(pairs)
// n -= n % 2
// values := make(map[string]string, n/2)
// for i := 0; i < n; i += 2 {
// if v := pairs[i+1]; v != sNilByte {
// values[pairs[i]] = pairs[i+1]
// }
// }
// return LabelValues(values)
// }
// func scanField(val []byte, field []string, group []string) ([]byte, bool) {
// for i := 0; i < len(group); i++ {
// if j := fieldIndexOf(field, group[i]); j < 0 || field[j+1] == sNilByte {
// return val, false
// } else {
// if i != 0 {
// val = append(val, LabelSeparator)
// }
// val = append(val, group[i]...)
// val = append(val, LabelSeparator)
// val = append(val, field[j+1]...)
// }
// }
// val = append(val, FieldTerminator)
// return val, true
// }
// const sLabelSeparator = "\x1f"
func (db *DB) Scan(key, match string, r ScanResult, results chan<- ScanResult) (err error) {
scan := db.Redis.HScan(key, 0, match, -1).Iterator()
i := 0
var pairs []string
group := len(r.Group) != 0
for scan.Next() {
if i%2 == 0 {
if group {
pairs = parseField(pairs[:0], scan.Val())
r.Values = FieldLabels(pairs)
}
} else {
r.count, r.err = strconv.ParseInt(scan.Val(), 10, 64)
results <- r
}
i++
}
if err = scan.Err(); err != nil {
return
}
return
}
type pair struct {
Label, Value string
Count int64
}
type FrequencyMap map[string]map[string]int64
// ValueScan return a frequency map of event label values
func (db *DB) ValueScan(event Descriptor, res Resolution, start, end time.Time) FrequencyMap {
desc := event.Describe()
labels := desc.Labels()
ch := make(chan pair, len(labels))
result := make(chan FrequencyMap)
go func() {
results := FrequencyMap{}
for i := 0; i < len(labels); i++ {
results[labels[i]] = make(map[string]int64)
}
for p := range ch {
results[p.Label][p.Value] += p.Count
}
result <- results
}()
wg := new(sync.WaitGroup)
data := []byte{}
ts := TimeSequence(start, end, res.Step())
for i := range ts {
wg.Add(1)
data = db.AppendKey(data[:0], res, desc.Name(), ts[i])
key := string(data)
go func(key string) {
var n int64
field := make([]string, len(labels))
scan := db.Redis.HScan(key, 0, "*", -1).Iterator()
i := 0
for scan.Next() {
if i%2 == 0 {
field = parseField(field[:0], scan.Val())
} else if n, _ = strconv.ParseInt(scan.Val(), 10, 64); n != 0 {
for j := 0; j < len(field); j += 2 {
if label, val := field[j], field[j+1]; val != sNilByte {
ch <- pair{label, val, n}
}
}
}
i++
}
wg.Done()
}(key)
}
wg.Wait()
close(ch)
return <-result
}
|
// Copyright 2014 The Cayley Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package graph
// Defines one of the base iterators, the Fixed iterator. A fixed iterator is quite simple; it
// contains an explicit fixed array of values.
//
// A fixed iterator requires an Equality function to be passed to it, by reason that TSVal, the
// opaque Triple store value, may not answer to ==.
import (
"fmt"
"strings"
)
// A Fixed iterator consists of it's values, an index (where it is in the process of Next()ing) and
// an equality function.
type FixedIterator struct {
BaseIterator
values []TSVal
lastIndex int
cmp Equality
}
// Define the signature of an equality function.
type Equality func(a, b TSVal) bool
// Define an equality function of purely ==, which works for native types.
func BasicEquality(a, b TSVal) bool {
if a == b {
return true
}
return false
}
// Creates a new Fixed iterator based around == equality.
func newFixedIterator() *FixedIterator {
return NewFixedIteratorWithCompare(BasicEquality)
}
// Creates a new Fixed iterator with a custom comparitor.
func NewFixedIteratorWithCompare(compareFn Equality) *FixedIterator {
var it FixedIterator
BaseIteratorInit(&it.BaseIterator)
it.values = make([]TSVal, 0, 20)
it.lastIndex = 0
it.cmp = compareFn
return &it
}
func (f *FixedIterator) Reset() {
f.lastIndex = 0
}
func (f *FixedIterator) Close() {
}
func (f *FixedIterator) Clone() Iterator {
out := NewFixedIteratorWithCompare(f.cmp)
for _, val := range f.values {
out.AddValue(val)
}
out.CopyTagsFrom(f)
return out
}
// Add a value to the iterator. The array now contains this value.
// TODO(barakmich): This ought to be a set someday, disallowing repeated values.
func (f *FixedIterator) AddValue(v TSVal) {
f.values = append(f.values, v)
}
// Print some information about the iterator.
func (f *FixedIterator) DebugString(indent int) string {
return fmt.Sprintf("%s(%s tags: %s Size: %d id0: %d)",
strings.Repeat(" ", indent),
f.Type(),
f.FixedTags(),
len(f.values),
f.values[0])
}
// Register this iterator as a Fixed iterator.
func (f *FixedIterator) Type() string {
return "fixed"
}
// Check if the passed value is equal to one of the values stored in the iterator.
func (f *FixedIterator) Check(v TSVal) bool {
// Could be optimized by keeping it sorted or using a better datastructure.
// However, for fixed iterators, which are by definition kind of tiny, this
// isn't a big issue.
CheckLogIn(f, v)
for _, x := range f.values {
if f.cmp(x, v) {
f.Last = x
return CheckLogOut(f, v, true)
}
}
return CheckLogOut(f, v, false)
}
// Return the next stored value from the iterator.
func (f *FixedIterator) Next() (TSVal, bool) {
NextLogIn(f)
if f.lastIndex == len(f.values) {
return NextLogOut(f, nil, false)
}
out := f.values[f.lastIndex]
f.Last = out
f.lastIndex++
return NextLogOut(f, out, true)
}
// Optimize() for a Fixed iterator is simple. Returns a Null iterator if it's empty
// (so that other iterators upstream can treat this as null) or there is no
// optimization.
func (f *FixedIterator) Optimize() (Iterator, bool) {
if len(f.values) == 1 && f.values[0] == nil {
return &NullIterator{}, true
}
return f, false
}
// Size is the number of values stored.
func (f *FixedIterator) Size() (int64, bool) {
return int64(len(f.values)), true
}
// As we right now have to scan the entire list, Next and Check are linear with the
// size. However, a better data structure could remove these limits.
func (a *FixedIterator) GetStats() *IteratorStats {
return &IteratorStats{
CheckCost: int64(len(a.values)),
NextCost: int64(len(a.values)),
Size: int64(len(a.values)),
}
}
Indexing into an empty fixed iterator for logging purposes no longer crashes. Fixes #3.
// Copyright 2014 The Cayley Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package graph
// Defines one of the base iterators, the Fixed iterator. A fixed iterator is quite simple; it
// contains an explicit fixed array of values.
//
// A fixed iterator requires an Equality function to be passed to it, by reason that TSVal, the
// opaque Triple store value, may not answer to ==.
import (
"fmt"
"strings"
)
// A Fixed iterator consists of it's values, an index (where it is in the process of Next()ing) and
// an equality function.
type FixedIterator struct {
BaseIterator
values []TSVal
lastIndex int
cmp Equality
}
// Define the signature of an equality function.
type Equality func(a, b TSVal) bool
// Define an equality function of purely ==, which works for native types.
func BasicEquality(a, b TSVal) bool {
if a == b {
return true
}
return false
}
// Creates a new Fixed iterator based around == equality.
func newFixedIterator() *FixedIterator {
return NewFixedIteratorWithCompare(BasicEquality)
}
// Creates a new Fixed iterator with a custom comparitor.
func NewFixedIteratorWithCompare(compareFn Equality) *FixedIterator {
var it FixedIterator
BaseIteratorInit(&it.BaseIterator)
it.values = make([]TSVal, 0, 20)
it.lastIndex = 0
it.cmp = compareFn
return &it
}
func (f *FixedIterator) Reset() {
f.lastIndex = 0
}
func (f *FixedIterator) Close() {
}
func (f *FixedIterator) Clone() Iterator {
out := NewFixedIteratorWithCompare(f.cmp)
for _, val := range f.values {
out.AddValue(val)
}
out.CopyTagsFrom(f)
return out
}
// Add a value to the iterator. The array now contains this value.
// TODO(barakmich): This ought to be a set someday, disallowing repeated values.
func (f *FixedIterator) AddValue(v TSVal) {
f.values = append(f.values, v)
}
// Print some information about the iterator.
func (f *FixedIterator) DebugString(indent int) string {
value := ""
if len(f.values) > 0 {
value = fmt.Sprint(f.values[0])
}
return fmt.Sprintf("%s(%s tags: %s Size: %d id0: %d)",
strings.Repeat(" ", indent),
f.Type(),
f.FixedTags(),
len(f.values),
value,
)
}
// Register this iterator as a Fixed iterator.
func (f *FixedIterator) Type() string {
return "fixed"
}
// Check if the passed value is equal to one of the values stored in the iterator.
func (f *FixedIterator) Check(v TSVal) bool {
// Could be optimized by keeping it sorted or using a better datastructure.
// However, for fixed iterators, which are by definition kind of tiny, this
// isn't a big issue.
CheckLogIn(f, v)
for _, x := range f.values {
if f.cmp(x, v) {
f.Last = x
return CheckLogOut(f, v, true)
}
}
return CheckLogOut(f, v, false)
}
// Return the next stored value from the iterator.
func (f *FixedIterator) Next() (TSVal, bool) {
NextLogIn(f)
if f.lastIndex == len(f.values) {
return NextLogOut(f, nil, false)
}
out := f.values[f.lastIndex]
f.Last = out
f.lastIndex++
return NextLogOut(f, out, true)
}
// Optimize() for a Fixed iterator is simple. Returns a Null iterator if it's empty
// (so that other iterators upstream can treat this as null) or there is no
// optimization.
func (f *FixedIterator) Optimize() (Iterator, bool) {
if len(f.values) == 1 && f.values[0] == nil {
return &NullIterator{}, true
}
return f, false
}
// Size is the number of values stored.
func (f *FixedIterator) Size() (int64, bool) {
return int64(len(f.values)), true
}
// As we right now have to scan the entire list, Next and Check are linear with the
// size. However, a better data structure could remove these limits.
func (a *FixedIterator) GetStats() *IteratorStats {
return &IteratorStats{
CheckCost: int64(len(a.values)),
NextCost: int64(len(a.values)),
Size: int64(len(a.values)),
}
}
|
package allthingstalk
import (
"github.com/gillesdemey/All-Things-Go/lib/allthingstalk/io"
"log"
)
type Device struct {
DeviceId string
ClientId string
ClientKey string
IODevices []*io.IODevice
}
/**
* Constructor for a new Device
*/
func NewDevice(device *Device) *Device {
device.Setup()
return device
}
/**
* Add a generic IODevice
*/
func (device *Device) AddIODevice(ioDevice *io.IODevice) *io.IODevice {
device.IODevices = append(device.IODevices, ioDevice)
device.RegisterAsset(ioDevice)
return ioDevice
}
/**
* Add an LED to the device configuration
*/
func (device *Device) NewLED(config *io.Config) *io.IODevice {
led := io.NewLED(config)
device.AddIODevice(led)
return led
}
/**
* Add a Button to the device configuration
*/
func (device *Device) NewButton(config *io.Config) *io.IODevice {
button := io.NewButton(config)
device.AddIODevice(button)
return button
}
/**
* Set up the appropriate Broker socket connection
*/
func (device *Device) Setup() {
_, err := NewBroker(device)
if err != nil {
log.Fatalf("Could not connect to broker: %s\n", err)
}
log.Printf("Successfuly connected to broker")
}
Added getIODeviceById function with appropriate error handling
package allthingstalk
import (
"errors"
"github.com/gillesdemey/All-Things-Go/lib/allthingstalk/io"
"log"
)
/**
* predefined errors
*/
var NOTFOUND = errors.New("NOTFOUND")
/**
* Device structure
*/
type Device struct {
DeviceId string
ClientId string
ClientKey string
IODevices []*io.IODevice
}
/**
* Constructor for a new Device
*/
func NewDevice(device *Device) *Device {
device.Setup()
return device
}
/**
* Add a generic IODevice
*/
func (device *Device) AddIODevice(ioDevice *io.IODevice) *io.IODevice {
device.IODevices = append(device.IODevices, ioDevice)
device.RegisterAsset(ioDevice)
return ioDevice
}
/**
* Add an LED to the device configuration
*/
func (device *Device) NewLED(config *io.Config) *io.IODevice {
led := io.NewLED(config)
device.AddIODevice(led)
return led
}
/**
* Add a Button to the device configuration
*/
func (device *Device) NewButton(config *io.Config) *io.IODevice {
button := io.NewButton(config)
device.AddIODevice(button)
return button
}
/**
* Searches for a device in the devices' list with the specified unique id
*/
func (device *Device) GetIODeviceById(id string) (*io.IODevice, error) {
for _, ioDevice := range device.IODevices {
if ioDevice.Id == id {
return ioDevice, nil
}
}
return nil, NOTFOUND
}
/**
* Set up the appropriate Broker socket connection
*/
func (device *Device) Setup() {
_, err := NewBroker(device)
if err != nil {
log.Fatalf("Could not connect to broker: %s\n", err)
}
log.Printf("Successfuly connected to broker")
}
|
package backend
import (
"bytes"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/pkg/sftp"
workerctx "github.com/travis-ci/worker/lib/context"
"github.com/travis-ci/worker/lib/metrics"
"golang.org/x/crypto/ssh"
"golang.org/x/net/context"
)
var nonAlphaNumRegexp = regexp.MustCompile(`[^a-zA-Z0-9_]+`)
const wrapperSh = `#!/bin/bash
[[ $(uname) = Linux ]] && exec bash ~/build.sh
[[ -f ~/build.sh.exit ]] && rm ~/build.sh.exit
until nc 127.0.0.1 15782; do sleep 1; done
until [[ -f ~/build.sh.exit ]]; do sleep 1; done
exit $(cat ~/build.sh.exit)
`
type JupiterBrainProvider struct {
client *http.Client
baseURL *url.URL
imageAliases map[string]string
sshKeyPath string
sshKeyPassphrase string
keychainPassword string
}
type JupiterBrainInstance struct {
payload jupiterBrainInstancePayload
provider *JupiterBrainProvider
}
type jupiterBrainInstancePayload struct {
ID string `json:"id"`
IpAddresses []string `json:"ip-addresses"`
State string `json:"state"`
BaseImage string `json:"base-image,omitempty"`
Type string `json:"type,omitempty"`
}
type jupiterBrainDataResponse struct {
Data []jupiterBrainInstancePayload `json:"data"`
}
func NewJupiterBrainProvider(config map[string]string) (*JupiterBrainProvider, error) {
endpoint, ok := config["endpoint"]
if !ok {
return nil, fmt.Errorf("expected endpoint config key")
}
baseURL, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
aliasNames, ok := config["image_aliases"]
if !ok {
return nil, fmt.Errorf("expected image_aliases config key")
}
imageAliases := make(map[string]string, len(aliasNames))
for _, aliasName := range strings.Split(aliasNames, ",") {
normalizedAliasName := string(nonAlphaNumRegexp.ReplaceAll([]byte(aliasName), []byte("_")))
imageName, ok := config[fmt.Sprintf("image_alias_%s", normalizedAliasName)]
if !ok {
return nil, fmt.Errorf("expected image alias %q", aliasName)
}
imageAliases[aliasName] = imageName
}
sshKeyPath, ok := config["ssh_key_path"]
if !ok {
return nil, fmt.Errorf("expected ssh_key_path config key")
}
sshKeyPassphrase, ok := config["ssh_key_passphrase"]
if !ok {
return nil, fmt.Errorf("expected ssh_key_passphrase config key")
}
keychainPassword, ok := config["keychain_password"]
if !ok {
return nil, fmt.Errorf("expected keychain_password config key")
}
return &JupiterBrainProvider{
client: http.DefaultClient,
baseURL: baseURL,
imageAliases: imageAliases,
sshKeyPath: sshKeyPath,
sshKeyPassphrase: sshKeyPassphrase,
keychainPassword: keychainPassword,
}, nil
}
func (p *JupiterBrainProvider) Start(ctx context.Context, startAttributes StartAttributes) (Instance, error) {
u, err := p.baseURL.Parse("instances")
if err != nil {
return nil, err
}
imageName, ok := p.imageAliases[startAttributes.OsxImage]
if !ok {
imageName, _ = p.imageAliases["default"]
}
if imageName == "" {
return nil, fmt.Errorf("no image alias for %s", startAttributes.OsxImage)
}
startBooting := time.Now()
bodyPayload := map[string]map[string]string{
"data": {
"type": "instances",
"base-image": imageName,
},
}
jsonBody, err := json.Marshal(bodyPayload)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", u.String(), bytes.NewReader(jsonBody))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/vnd.api+json")
resp, err := p.httpDo(req)
if err != nil {
return nil, err
}
defer io.Copy(ioutil.Discard, resp.Body)
defer resp.Body.Close()
if c := resp.StatusCode; c < 200 || c >= 300 {
body, _ := ioutil.ReadAll(resp.Body)
return nil, fmt.Errorf("expected 2xx from Jupiter Brain API, got %d (error: %s)", c, body)
}
dataPayload := &jupiterBrainDataResponse{}
err = json.NewDecoder(resp.Body).Decode(dataPayload)
if err != nil {
workerctx.LoggerFromContext(ctx).WithFields(logrus.Fields{
"err": err,
"payload": dataPayload,
"body": resp.Body,
}).Error("couldn't decode created payload")
return nil, fmt.Errorf("couldn't decode created payload: %s", err)
}
payload := dataPayload.Data[0]
instanceReady := make(chan jupiterBrainInstancePayload, 1)
errChan := make(chan error, 1)
go func(id string) {
u, err := p.baseURL.Parse(fmt.Sprintf("instances/%s", url.QueryEscape(id)))
if err != nil {
errChan <- err
return
}
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
errChan <- err
return
}
for {
resp, err := p.httpDo(req)
if err != nil {
errChan <- err
return
}
if resp.StatusCode != 200 {
body, _ := ioutil.ReadAll(resp.Body)
errChan <- fmt.Errorf("unknown status code: %d, expected 200 (body: %q)", resp.StatusCode, string(body))
return
}
dataPayload := &jupiterBrainDataResponse{}
err = json.NewDecoder(resp.Body).Decode(dataPayload)
if err != nil {
errChan <- fmt.Errorf("couldn't decode refresh payload: %s", err)
return
}
payload := dataPayload.Data[0]
_, _ = io.Copy(ioutil.Discard, resp.Body)
_ = resp.Body.Close()
var ip net.IP
for _, ipString := range payload.IpAddresses {
curIp := net.ParseIP(ipString)
if curIp.To4() != nil {
ip = curIp
break
}
}
if ip == nil {
continue
}
conn, err := net.Dial("tcp", fmt.Sprintf("%s:22", ip.String()))
if conn != nil {
conn.Close()
}
if err == nil {
instanceReady <- payload
return
}
time.Sleep(time.Second)
}
}(payload.ID)
select {
case payload := <-instanceReady:
metrics.TimeSince("worker.vm.provider.jupiterbrain.boot", startBooting)
workerctx.LoggerFromContext(ctx).WithField("instance_uuid", payload.ID).Info("booted instance")
return &JupiterBrainInstance{
payload: payload,
provider: p,
}, nil
case err := <-errChan:
instance := &JupiterBrainInstance{
payload: payload,
provider: p,
}
instance.Stop(ctx)
return nil, err
case <-ctx.Done():
if ctx.Err() == context.DeadlineExceeded {
metrics.Mark("worker.vm.provider.jupiterbrain.boot.timeout")
}
instance := &JupiterBrainInstance{
payload: payload,
provider: p,
}
instance.Stop(ctx)
return nil, ctx.Err()
}
}
func (p *JupiterBrainProvider) httpDo(req *http.Request) (*http.Response, error) {
if req.URL.User != nil {
token := req.URL.User.Username()
req.URL.User = nil
req.Header.Set("Authorization", "token "+token)
}
return p.client.Do(req)
}
func (i *JupiterBrainInstance) UploadScript(ctx context.Context, script []byte) error {
client, err := i.sshClient()
if err != nil {
return err
}
defer client.Close()
sftp, err := sftp.NewClient(client)
if err != nil {
return err
}
defer sftp.Close()
f, err := sftp.Create("build.sh")
if err != nil {
return err
}
_, err = f.Write(script)
if err != nil {
return err
}
f, err = sftp.Create("wrapper.sh")
if err != nil {
return err
}
_, err = fmt.Fprintf(f, wrapperSh)
return err
}
func (i *JupiterBrainInstance) RunScript(ctx context.Context, output io.WriteCloser) (RunResult, error) {
client, err := i.sshClient()
if err != nil {
return RunResult{Completed: false}, err
}
defer client.Close()
session, err := client.NewSession()
if err != nil {
return RunResult{Completed: false}, err
}
defer session.Close()
err = session.RequestPty("xterm", 80, 40, ssh.TerminalModes{})
if err != nil {
return RunResult{Completed: false}, err
}
session.Stdout = output
session.Stderr = output
err = session.Run("bash ~/wrapper.sh")
defer output.Close()
if err == nil {
return RunResult{Completed: true, ExitCode: 0}, nil
}
switch err := err.(type) {
case *ssh.ExitError:
return RunResult{Completed: true, ExitCode: uint8(err.ExitStatus())}, nil
default:
return RunResult{Completed: false}, err
}
}
func (i *JupiterBrainInstance) Stop(ctx context.Context) error {
u, err := i.provider.baseURL.Parse(fmt.Sprintf("instances/%s", url.QueryEscape(i.payload.ID)))
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", u.String(), nil)
if err != nil {
return err
}
resp, err := i.provider.httpDo(req)
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
return err
}
func (i *JupiterBrainInstance) sshClient() (*ssh.Client, error) {
file, err := ioutil.ReadFile(i.provider.sshKeyPath)
if err != nil {
return nil, err
}
block, _ := pem.Decode(file)
if block == nil {
return nil, fmt.Errorf("ssh key does not contain a valid PEM block")
}
der, err := x509.DecryptPEMBlock(block, []byte(i.provider.sshKeyPassphrase))
if err != nil {
return nil, err
}
key, err := x509.ParsePKCS1PrivateKey(der)
if err != nil {
return nil, err
}
signer, err := ssh.NewSignerFromKey(key)
if err != nil {
return nil, err
}
var ip net.IP
for _, ipString := range i.payload.IpAddresses {
curIp := net.ParseIP(ipString)
if curIp.To4() != nil {
ip = curIp
break
}
}
if ip == nil {
return nil, fmt.Errorf("no valid IPv4 address")
}
return ssh.Dial("tcp", fmt.Sprintf("%s:22", ip.String()), &ssh.ClientConfig{
User: "travis",
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
})
}
Sleep a small amount between loops if IP isn't available yet
package backend
import (
"bytes"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
"regexp"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/pkg/sftp"
workerctx "github.com/travis-ci/worker/lib/context"
"github.com/travis-ci/worker/lib/metrics"
"golang.org/x/crypto/ssh"
"golang.org/x/net/context"
)
var nonAlphaNumRegexp = regexp.MustCompile(`[^a-zA-Z0-9_]+`)
const wrapperSh = `#!/bin/bash
[[ $(uname) = Linux ]] && exec bash ~/build.sh
[[ -f ~/build.sh.exit ]] && rm ~/build.sh.exit
until nc 127.0.0.1 15782; do sleep 1; done
until [[ -f ~/build.sh.exit ]]; do sleep 1; done
exit $(cat ~/build.sh.exit)
`
type JupiterBrainProvider struct {
client *http.Client
baseURL *url.URL
imageAliases map[string]string
sshKeyPath string
sshKeyPassphrase string
keychainPassword string
}
type JupiterBrainInstance struct {
payload jupiterBrainInstancePayload
provider *JupiterBrainProvider
}
type jupiterBrainInstancePayload struct {
ID string `json:"id"`
IpAddresses []string `json:"ip-addresses"`
State string `json:"state"`
BaseImage string `json:"base-image,omitempty"`
Type string `json:"type,omitempty"`
}
type jupiterBrainDataResponse struct {
Data []jupiterBrainInstancePayload `json:"data"`
}
func NewJupiterBrainProvider(config map[string]string) (*JupiterBrainProvider, error) {
endpoint, ok := config["endpoint"]
if !ok {
return nil, fmt.Errorf("expected endpoint config key")
}
baseURL, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
aliasNames, ok := config["image_aliases"]
if !ok {
return nil, fmt.Errorf("expected image_aliases config key")
}
imageAliases := make(map[string]string, len(aliasNames))
for _, aliasName := range strings.Split(aliasNames, ",") {
normalizedAliasName := string(nonAlphaNumRegexp.ReplaceAll([]byte(aliasName), []byte("_")))
imageName, ok := config[fmt.Sprintf("image_alias_%s", normalizedAliasName)]
if !ok {
return nil, fmt.Errorf("expected image alias %q", aliasName)
}
imageAliases[aliasName] = imageName
}
sshKeyPath, ok := config["ssh_key_path"]
if !ok {
return nil, fmt.Errorf("expected ssh_key_path config key")
}
sshKeyPassphrase, ok := config["ssh_key_passphrase"]
if !ok {
return nil, fmt.Errorf("expected ssh_key_passphrase config key")
}
keychainPassword, ok := config["keychain_password"]
if !ok {
return nil, fmt.Errorf("expected keychain_password config key")
}
return &JupiterBrainProvider{
client: http.DefaultClient,
baseURL: baseURL,
imageAliases: imageAliases,
sshKeyPath: sshKeyPath,
sshKeyPassphrase: sshKeyPassphrase,
keychainPassword: keychainPassword,
}, nil
}
func (p *JupiterBrainProvider) Start(ctx context.Context, startAttributes StartAttributes) (Instance, error) {
u, err := p.baseURL.Parse("instances")
if err != nil {
return nil, err
}
imageName, ok := p.imageAliases[startAttributes.OsxImage]
if !ok {
imageName, _ = p.imageAliases["default"]
}
if imageName == "" {
return nil, fmt.Errorf("no image alias for %s", startAttributes.OsxImage)
}
startBooting := time.Now()
bodyPayload := map[string]map[string]string{
"data": {
"type": "instances",
"base-image": imageName,
},
}
jsonBody, err := json.Marshal(bodyPayload)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", u.String(), bytes.NewReader(jsonBody))
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/vnd.api+json")
resp, err := p.httpDo(req)
if err != nil {
return nil, err
}
defer io.Copy(ioutil.Discard, resp.Body)
defer resp.Body.Close()
if c := resp.StatusCode; c < 200 || c >= 300 {
body, _ := ioutil.ReadAll(resp.Body)
return nil, fmt.Errorf("expected 2xx from Jupiter Brain API, got %d (error: %s)", c, body)
}
dataPayload := &jupiterBrainDataResponse{}
err = json.NewDecoder(resp.Body).Decode(dataPayload)
if err != nil {
workerctx.LoggerFromContext(ctx).WithFields(logrus.Fields{
"err": err,
"payload": dataPayload,
"body": resp.Body,
}).Error("couldn't decode created payload")
return nil, fmt.Errorf("couldn't decode created payload: %s", err)
}
payload := dataPayload.Data[0]
instanceReady := make(chan jupiterBrainInstancePayload, 1)
errChan := make(chan error, 1)
go func(id string) {
u, err := p.baseURL.Parse(fmt.Sprintf("instances/%s", url.QueryEscape(id)))
if err != nil {
errChan <- err
return
}
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
errChan <- err
return
}
for {
resp, err := p.httpDo(req)
if err != nil {
errChan <- err
return
}
if resp.StatusCode != 200 {
body, _ := ioutil.ReadAll(resp.Body)
errChan <- fmt.Errorf("unknown status code: %d, expected 200 (body: %q)", resp.StatusCode, string(body))
return
}
dataPayload := &jupiterBrainDataResponse{}
err = json.NewDecoder(resp.Body).Decode(dataPayload)
if err != nil {
errChan <- fmt.Errorf("couldn't decode refresh payload: %s", err)
return
}
payload := dataPayload.Data[0]
_, _ = io.Copy(ioutil.Discard, resp.Body)
_ = resp.Body.Close()
var ip net.IP
for _, ipString := range payload.IpAddresses {
curIp := net.ParseIP(ipString)
if curIp.To4() != nil {
ip = curIp
break
}
}
if ip == nil {
time.Sleep(100 * time.Millisecond)
continue
}
conn, err := net.Dial("tcp", fmt.Sprintf("%s:22", ip.String()))
if conn != nil {
conn.Close()
}
if err == nil {
instanceReady <- payload
return
}
time.Sleep(time.Second)
}
}(payload.ID)
select {
case payload := <-instanceReady:
metrics.TimeSince("worker.vm.provider.jupiterbrain.boot", startBooting)
workerctx.LoggerFromContext(ctx).WithField("instance_uuid", payload.ID).Info("booted instance")
return &JupiterBrainInstance{
payload: payload,
provider: p,
}, nil
case err := <-errChan:
instance := &JupiterBrainInstance{
payload: payload,
provider: p,
}
instance.Stop(ctx)
return nil, err
case <-ctx.Done():
if ctx.Err() == context.DeadlineExceeded {
metrics.Mark("worker.vm.provider.jupiterbrain.boot.timeout")
}
instance := &JupiterBrainInstance{
payload: payload,
provider: p,
}
instance.Stop(ctx)
return nil, ctx.Err()
}
}
func (p *JupiterBrainProvider) httpDo(req *http.Request) (*http.Response, error) {
if req.URL.User != nil {
token := req.URL.User.Username()
req.URL.User = nil
req.Header.Set("Authorization", "token "+token)
}
return p.client.Do(req)
}
func (i *JupiterBrainInstance) UploadScript(ctx context.Context, script []byte) error {
client, err := i.sshClient()
if err != nil {
return err
}
defer client.Close()
sftp, err := sftp.NewClient(client)
if err != nil {
return err
}
defer sftp.Close()
f, err := sftp.Create("build.sh")
if err != nil {
return err
}
_, err = f.Write(script)
if err != nil {
return err
}
f, err = sftp.Create("wrapper.sh")
if err != nil {
return err
}
_, err = fmt.Fprintf(f, wrapperSh)
return err
}
func (i *JupiterBrainInstance) RunScript(ctx context.Context, output io.WriteCloser) (RunResult, error) {
client, err := i.sshClient()
if err != nil {
return RunResult{Completed: false}, err
}
defer client.Close()
session, err := client.NewSession()
if err != nil {
return RunResult{Completed: false}, err
}
defer session.Close()
err = session.RequestPty("xterm", 80, 40, ssh.TerminalModes{})
if err != nil {
return RunResult{Completed: false}, err
}
session.Stdout = output
session.Stderr = output
err = session.Run("bash ~/wrapper.sh")
defer output.Close()
if err == nil {
return RunResult{Completed: true, ExitCode: 0}, nil
}
switch err := err.(type) {
case *ssh.ExitError:
return RunResult{Completed: true, ExitCode: uint8(err.ExitStatus())}, nil
default:
return RunResult{Completed: false}, err
}
}
func (i *JupiterBrainInstance) Stop(ctx context.Context) error {
u, err := i.provider.baseURL.Parse(fmt.Sprintf("instances/%s", url.QueryEscape(i.payload.ID)))
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", u.String(), nil)
if err != nil {
return err
}
resp, err := i.provider.httpDo(req)
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
return err
}
func (i *JupiterBrainInstance) sshClient() (*ssh.Client, error) {
file, err := ioutil.ReadFile(i.provider.sshKeyPath)
if err != nil {
return nil, err
}
block, _ := pem.Decode(file)
if block == nil {
return nil, fmt.Errorf("ssh key does not contain a valid PEM block")
}
der, err := x509.DecryptPEMBlock(block, []byte(i.provider.sshKeyPassphrase))
if err != nil {
return nil, err
}
key, err := x509.ParsePKCS1PrivateKey(der)
if err != nil {
return nil, err
}
signer, err := ssh.NewSignerFromKey(key)
if err != nil {
return nil, err
}
var ip net.IP
for _, ipString := range i.payload.IpAddresses {
curIp := net.ParseIP(ipString)
if curIp.To4() != nil {
ip = curIp
break
}
}
if ip == nil {
return nil, fmt.Errorf("no valid IPv4 address")
}
return ssh.Dial("tcp", fmt.Sprintf("%s:22", ip.String()), &ssh.ClientConfig{
User: "travis",
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
})
}
|
// Copyright 2013 Péter Surányi. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pathlist
import (
"runtime"
"strings"
"testing"
"gopkg.in/pathlist.v0/internal"
)
var _ Error = internal.Error{}
var constTests = []struct {
exprStr, expr, want string
}{
{exprStr: "ErrQuote", expr: ErrQuote, want: internal.ErrQuote},
{exprStr: "ErrSep", expr: ErrSep, want: internal.ErrSep},
}
// Ensure that error constants don't diverge.
func TestConst(t *testing.T) {
for _, tt := range constTests {
if tt.expr != tt.want {
t.Errorf("%s = %q; want %q", tt.exprStr, tt.expr, tt.want)
} else {
t.Logf("%s = %q", tt.exprStr, tt.expr)
}
}
}
func colonToSep(list List) List {
return List(strings.Replace(string(list), ":", string(ListSeparator), -1))
}
func equiv(l1, l2 []string) bool {
i1, i2 := 0, 0
for ; i1 < len(l1) && i2 < len(l2); i1, i2 = i1+1, i2+1 {
// treat sequences of empty strings as equivalent.
for i1 < len(l1)-1 && l1[i1] == "" && l1[i1+1] == "" {
i1++
}
for i2 < len(l2)-1 && l2[i2] == "" && l2[i2+1] == "" {
i2++
}
if l1[i1] != l2[i2] {
return false
}
}
return i1 == len(l1) && i2 == len(l2)
}
type newTest struct {
filepaths []string
ok bool
list List
}
var newTests = []newTest{
{[]string{}, true, ""},
{[]string{""}, true, ":"},
{[]string{"a"}, true, "a"},
{[]string{"", ""}, true, ":"},
{[]string{"a", ""}, true, "a:"},
{[]string{"", "b"}, true, ":b"},
{[]string{"a", "b"}, true, "a:b"},
}
var newTestsUnixPlan9 = []newTest{
{[]string{":"}, false, ""},
}
var newTestsWindows = []newTest{
{[]string{`a;b`}, true, `"a;b"`},
{[]string{`"a"`}, false, ""},
}
func TestNew(t *testing.T) {
tests := newTests
switch runtime.GOOS {
case "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd",
"plan9", "solaris":
tests = append(tests, newTestsUnixPlan9...)
case "windows":
tests = append(tests, newTestsWindows...)
}
for _, tt := range tests {
exp := colonToSep(tt.list)
l, err := New(tt.filepaths...)
switch {
case tt.ok && err != nil:
t.Errorf("New(%q) = %v, %v; want %#q, nil",
tt.filepaths, l, err, exp)
case !tt.ok && err == nil:
t.Errorf("New(%q) = %#q, %v; want error", tt.filepaths, l, err)
case l != exp:
t.Errorf("New(%q) = %#q, %v; want %#q, nil",
tt.filepaths, l, err, exp)
default:
t.Logf("New(%q) = %q, %v", tt.filepaths, l, err)
}
}
}
type appendToTest struct {
list List
filepaths []string
appended List
prepended List
}
var appendToTests = []appendToTest{
{"", []string{}, "", ""},
{"", []string{""}, ":", ":"},
{"", []string{"c"}, "c", "c"},
{"", []string{"c", "d"}, "c:d", "c:d"},
{"a", []string{}, "a", "a"},
{"a", []string{""}, "a:", ":a"},
{"a", []string{"c"}, "a:c", "c:a"},
{"a", []string{"c", "d"}, "a:c:d", "c:d:a"},
{":", []string{""}, ":", ":"},
{":", []string{"c"}, ":c", "c:"},
{":", []string{"c", "d"}, ":c:d", "c:d:"},
{"a:b", []string{}, "a:b", "a:b"},
{"a:b", []string{""}, "a:b:", ":a:b"},
{"a:b", []string{"c"}, "a:b:c", "c:a:b"},
{"a:b", []string{"c", "d"}, "a:b:c:d", "c:d:a:b"},
{"a:", []string{}, "a:", "a:"},
{"a:", []string{""}, "a:", ":a:"},
{"a:", []string{"c"}, "a::c", "c:a:"},
{"a:", []string{"c", "d"}, "a::c:d", "c:d:a:"},
{":b", []string{}, ":b", ":b"},
{":b", []string{""}, ":b:", ":b"},
{":b", []string{"c"}, ":b:c", "c::b"},
{":b", []string{"c", "d"}, ":b:c:d", "c:d::b"},
{"::", []string{}, "::", "::"},
{"::", []string{""}, "::", "::"},
{"::", []string{"c"}, "::c", "c::"},
{"::", []string{"c", "d"}, "::c:d", "c:d::"},
{"a::", []string{}, "a::", "a::"},
{"a::", []string{""}, "a::", ":a::"},
{"a::", []string{"c"}, "a::c", "c:a::"},
{"a::", []string{"c", "d"}, "a::c:d", "c:d:a::"},
{"::b", []string{}, "::b", "::b"},
{"::b", []string{""}, "::b:", "::b"},
{"::b", []string{"c"}, "::b:c", "c::b"},
{"::b", []string{"c", "d"}, "::b:c:d", "c:d::b"},
}
func TestAppendTo(t *testing.T) {
for _, tt := range appendToTests {
testAppendToCase(t, tt)
}
}
func testAppendToCase(t *testing.T, tt appendToTest) {
appended, aerr := AppendTo(tt.list, tt.filepaths...)
switch {
case aerr != nil:
t.Errorf("AppendTo(%q, %q) = %#q, %v; want equivalent to %q, nil",
tt.list, tt.filepaths, appended, aerr, tt.appended)
case !equiv(Split(tt.appended), Split(appended)):
t.Errorf("AppendTo(%q, %q) = %#q, %v; want equivalent to %q, nil",
tt.list, tt.filepaths, appended, aerr, tt.appended)
default:
t.Logf("AppendTo(%q, %q) = %#q, %v",
tt.list, tt.filepaths, appended, aerr)
}
prepended, perr := PrependTo(tt.list, tt.filepaths...)
switch {
case perr != nil:
t.Errorf("PrependTo(%q, %q) = %#q, %v; want equivalent to %q, nil",
tt.list, tt.filepaths, prepended, perr, tt.prepended)
case !equiv(Split(tt.prepended), Split(prepended)):
t.Errorf("PrependTo(%q, %q) = %#q, %v; want equivalent to %q, nil",
tt.list, tt.filepaths, prepended, perr, tt.prepended)
default:
t.Logf("PrependTo(%q, %q) = %#q, %v",
tt.list, tt.filepaths, prepended, perr)
}
}
func TestAppendToCloseQuote(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skip("windows-only test")
}
l := List(`a"a`)
fp := "b"
{ // AppendTo
want := List(`a"a";b`)
got, err := AppendTo(l, fp)
if err != nil || got != want {
t.Errorf("AppendTo(%#q, %q) = %#q, %v; want %#q, nil", l, fp, got, err, want)
} else {
t.Logf("AppendTo(%#q, %q) = %#q, %v", l, fp, got, err)
}
}
{ // PrependTo
want := List(`b;a"a"`)
got, err := PrependTo(l, fp)
if err != nil || got != want {
t.Errorf("PrependTo(%#q, %q) = %#q, %v; want %#q, nil", l, fp, got, err, want)
} else {
t.Logf("PrependTo(%#q, %q) = %#q, %v", l, fp, got, err)
}
}
}
func TestAppendToInvalidFilepath(t *testing.T) {
if invalidFilepath == "" {
t.Skip("no invalid filepath on this OS")
}
{
got, err := AppendTo(colonToSep("a:b"), invalidFilepath)
if err == nil {
t.Errorf("AppendTo(%#q, %q) = %#q, %v, want error", colonToSep("a:b"),
invalidFilepath, got, err)
} else {
t.Logf("AppendTo(%#q, %q) = %#q, %v", colonToSep("a:b"),
invalidFilepath, got, err)
}
}
{
got, err := PrependTo(colonToSep("a:b"), invalidFilepath)
if err == nil {
t.Errorf("PrependTo(%#q, %q) = %#q, %v, want error", colonToSep("a:b"),
invalidFilepath, got, err)
} else {
t.Logf("PrependTo(%#q, %q) = %#q, %v", colonToSep("a:b"),
invalidFilepath, got, err)
}
}
}
func TestMustOK(t *testing.T) {
want := colonToSep("a:b:c")
got := Must(AppendTo(colonToSep("a:b"), "c"))
if equiv(Split(got), Split(want)) {
t.Logf("Must(AppendTo(%#q, %q)) = %#q", colonToSep("a:b"), "c", got)
} else {
t.Logf("Must(AppendTo(%#q, %q)) = %#q, want equivalent to %#q",
colonToSep("a:b"), "c", got, want)
}
}
func TestMustPanic(t *testing.T) {
if invalidFilepath == "" {
t.Skip("no invalid filepath on this OS")
}
defer func() {
if r := recover(); r != nil {
t.Logf("Must(AppendTo(%#q, %q)): panic %v", colonToSep("a:b"),
invalidFilepath, r)
}
}()
got := Must(AppendTo(colonToSep("a:b"), invalidFilepath))
t.Logf("Must(AppendTo(%#q, %q)) = %v, want panic", colonToSep("a:b"),
invalidFilepath, got)
}
fix windows tests
// Copyright 2013 Péter Surányi. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pathlist
import (
"runtime"
"strings"
"testing"
"gopkg.in/pathlist.v0/internal"
)
var _ Error = internal.Error{}
var constTests = []struct {
exprStr, expr, want string
}{
{exprStr: "ErrQuote", expr: ErrQuote, want: internal.ErrQuote},
{exprStr: "ErrSep", expr: ErrSep, want: internal.ErrSep},
}
// Ensure that error constants don't diverge.
func TestConst(t *testing.T) {
for _, tt := range constTests {
if tt.expr != tt.want {
t.Errorf("%s = %q; want %q", tt.exprStr, tt.expr, tt.want)
} else {
t.Logf("%s = %q", tt.exprStr, tt.expr)
}
}
}
func colonToSep(list List) List {
return List(strings.Replace(string(list), ":", string(ListSeparator), -1))
}
func equiv(l1, l2 []string) bool {
i1, i2 := 0, 0
for ; i1 < len(l1) && i2 < len(l2); i1, i2 = i1+1, i2+1 {
// treat sequences of empty strings as equivalent.
for i1 < len(l1)-1 && l1[i1] == "" && l1[i1+1] == "" {
i1++
}
for i2 < len(l2)-1 && l2[i2] == "" && l2[i2+1] == "" {
i2++
}
if l1[i1] != l2[i2] {
return false
}
}
return i1 == len(l1) && i2 == len(l2)
}
type newTest struct {
filepaths []string
ok bool
list List
}
var newTests = []newTest{
{[]string{}, true, ""},
{[]string{""}, true, ":"},
{[]string{"a"}, true, "a"},
{[]string{"", ""}, true, ":"},
{[]string{"a", ""}, true, "a:"},
{[]string{"", "b"}, true, ":b"},
{[]string{"a", "b"}, true, "a:b"},
}
var newTestsUnixPlan9 = []newTest{
{[]string{":"}, false, ""},
}
var newTestsWindows = []newTest{
{[]string{`a;b`}, true, `"a;b"`},
{[]string{`"a"`}, false, ""},
}
func TestNew(t *testing.T) {
tests := newTests
switch runtime.GOOS {
case "darwin", "dragonfly", "freebsd", "linux", "netbsd", "openbsd",
"plan9", "solaris":
tests = append(tests, newTestsUnixPlan9...)
case "windows":
tests = append(tests, newTestsWindows...)
}
for _, tt := range tests {
exp := colonToSep(tt.list)
l, err := New(tt.filepaths...)
switch {
case tt.ok && err != nil:
t.Errorf("New(%q) = %v, %v; want %#q, nil",
tt.filepaths, l, err, exp)
case !tt.ok && err == nil:
t.Errorf("New(%q) = %#q, %v; want error", tt.filepaths, l, err)
case l != exp:
t.Errorf("New(%q) = %#q, %v; want %#q, nil",
tt.filepaths, l, err, exp)
default:
t.Logf("New(%q) = %q, %v", tt.filepaths, l, err)
}
}
}
type appendToTest struct {
list List
filepaths []string
appended List
prepended List
}
var appendToTests = []appendToTest{
{"", []string{}, "", ""},
{"", []string{""}, ":", ":"},
{"", []string{"c"}, "c", "c"},
{"", []string{"c", "d"}, "c:d", "c:d"},
{"a", []string{}, "a", "a"},
{"a", []string{""}, "a:", ":a"},
{"a", []string{"c"}, "a:c", "c:a"},
{"a", []string{"c", "d"}, "a:c:d", "c:d:a"},
{":", []string{""}, ":", ":"},
{":", []string{"c"}, ":c", "c:"},
{":", []string{"c", "d"}, ":c:d", "c:d:"},
{"a:b", []string{}, "a:b", "a:b"},
{"a:b", []string{""}, "a:b:", ":a:b"},
{"a:b", []string{"c"}, "a:b:c", "c:a:b"},
{"a:b", []string{"c", "d"}, "a:b:c:d", "c:d:a:b"},
{"a:", []string{}, "a:", "a:"},
{"a:", []string{""}, "a:", ":a:"},
{"a:", []string{"c"}, "a::c", "c:a:"},
{"a:", []string{"c", "d"}, "a::c:d", "c:d:a:"},
{":b", []string{}, ":b", ":b"},
{":b", []string{""}, ":b:", ":b"},
{":b", []string{"c"}, ":b:c", "c::b"},
{":b", []string{"c", "d"}, ":b:c:d", "c:d::b"},
{"::", []string{}, "::", "::"},
{"::", []string{""}, "::", "::"},
{"::", []string{"c"}, "::c", "c::"},
{"::", []string{"c", "d"}, "::c:d", "c:d::"},
{"a::", []string{}, "a::", "a::"},
{"a::", []string{""}, "a::", ":a::"},
{"a::", []string{"c"}, "a::c", "c:a::"},
{"a::", []string{"c", "d"}, "a::c:d", "c:d:a::"},
{"::b", []string{}, "::b", "::b"},
{"::b", []string{""}, "::b:", "::b"},
{"::b", []string{"c"}, "::b:c", "c::b"},
{"::b", []string{"c", "d"}, "::b:c:d", "c:d::b"},
}
func TestAppendTo(t *testing.T) {
for _, tt := range appendToTests {
testAppendToCase(t, tt)
}
}
func testAppendToCase(t *testing.T, tt appendToTest) {
appended, aerr := AppendTo(colonToSep(tt.list), tt.filepaths...)
switch {
case aerr != nil:
t.Errorf("AppendTo(%q, %q) = %#q, %v; want equivalent to %q, nil",
tt.list, tt.filepaths, appended, aerr, tt.appended)
case !equiv(Split(colonToSep(tt.appended)), Split(appended)):
t.Errorf("AppendTo(%q, %q) = %#q, %v; want equivalent to %q, nil",
tt.list, tt.filepaths, appended, aerr, tt.appended)
default:
t.Logf("AppendTo(%q, %q) = %#q, %v",
tt.list, tt.filepaths, appended, aerr)
}
prepended, perr := PrependTo(colonToSep(tt.list), tt.filepaths...)
switch {
case perr != nil:
t.Errorf("PrependTo(%q, %q) = %#q, %v; want equivalent to %q, nil",
tt.list, tt.filepaths, prepended, perr, tt.prepended)
case !equiv(Split(colonToSep(tt.prepended)), Split(prepended)):
t.Errorf("PrependTo(%q, %q) = %#q, %v; want equivalent to %q, nil",
tt.list, tt.filepaths, prepended, perr, tt.prepended)
default:
t.Logf("PrependTo(%q, %q) = %#q, %v",
tt.list, tt.filepaths, prepended, perr)
}
}
func TestAppendToCloseQuote(t *testing.T) {
if runtime.GOOS != "windows" {
t.Skip("windows-only test")
}
l := List(`a"a`)
fp := "b"
{ // AppendTo
want := List(`a"a";b`)
got, err := AppendTo(l, fp)
if err != nil || got != want {
t.Errorf("AppendTo(%#q, %q) = %#q, %v; want %#q, nil", l, fp, got, err, want)
} else {
t.Logf("AppendTo(%#q, %q) = %#q, %v", l, fp, got, err)
}
}
{ // PrependTo
want := List(`b;a"a"`)
got, err := PrependTo(l, fp)
if err != nil || got != want {
t.Errorf("PrependTo(%#q, %q) = %#q, %v; want %#q, nil", l, fp, got, err, want)
} else {
t.Logf("PrependTo(%#q, %q) = %#q, %v", l, fp, got, err)
}
}
}
func TestAppendToInvalidFilepath(t *testing.T) {
if invalidFilepath == "" {
t.Skip("no invalid filepath on this OS")
}
{
got, err := AppendTo(colonToSep("a:b"), invalidFilepath)
if err == nil {
t.Errorf("AppendTo(%#q, %q) = %#q, %v, want error", colonToSep("a:b"),
invalidFilepath, got, err)
} else {
t.Logf("AppendTo(%#q, %q) = %#q, %v", colonToSep("a:b"),
invalidFilepath, got, err)
}
}
{
got, err := PrependTo(colonToSep("a:b"), invalidFilepath)
if err == nil {
t.Errorf("PrependTo(%#q, %q) = %#q, %v, want error", colonToSep("a:b"),
invalidFilepath, got, err)
} else {
t.Logf("PrependTo(%#q, %q) = %#q, %v", colonToSep("a:b"),
invalidFilepath, got, err)
}
}
}
func TestMustOK(t *testing.T) {
want := colonToSep("a:b:c")
got := Must(AppendTo(colonToSep("a:b"), "c"))
if equiv(Split(got), Split(want)) {
t.Logf("Must(AppendTo(%#q, %q)) = %#q", colonToSep("a:b"), "c", got)
} else {
t.Logf("Must(AppendTo(%#q, %q)) = %#q, want equivalent to %#q",
colonToSep("a:b"), "c", got, want)
}
}
func TestMustPanic(t *testing.T) {
if invalidFilepath == "" {
t.Skip("no invalid filepath on this OS")
}
defer func() {
if r := recover(); r != nil {
t.Logf("Must(AppendTo(%#q, %q)): panic %v", colonToSep("a:b"),
invalidFilepath, r)
}
}()
got := Must(AppendTo(colonToSep("a:b"), invalidFilepath))
t.Logf("Must(AppendTo(%#q, %q)) = %v, want panic", colonToSep("a:b"),
invalidFilepath, got)
}
|
package main
import (
. "github.com/gorilla/feeds"
"github.com/gorilla/mux"
"github.com/sourcegraph/sitemap"
"html/template"
"log"
"net/http"
"strconv"
"strings"
"time"
)
const (
itemsPerPage = 10
)
type Listboard struct {
config *Config
db *Database
}
type TemplateData map[string]interface{}
type ValidationErrors []string
var helperFuncs = template.FuncMap{
"lang": hfLang,
"time": hfTime,
"slug": hfSlug,
}
func NewListboard() *Listboard {
return &Listboard{}
}
func NewTemplateData(sc *SiteConfig) TemplateData {
td := make(TemplateData)
td["Title"] = "Title is not defined"
td["ShowVote"] = false
return td
}
func render(data *TemplateData, w http.ResponseWriter, r *http.Request, filenames ...string) {
t := template.New("layout.html")
t.Funcs(helperFuncs)
if err := template.Must(t.ParseFiles(filenames...)).Execute(w, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func (l *Listboard) Run() {
l.config = NewConfig()
l.db = NewDatabase(l.config)
r := mux.NewRouter()
r.HandleFunc("/", http.HandlerFunc(l.indexHandler)).Methods("GET")
r.HandleFunc("/feed.xml", http.HandlerFunc(l.feedHandler)).Methods("GET")
r.HandleFunc("/all.xml", http.HandlerFunc(l.feedAlllHandler)).Methods("GET")
r.HandleFunc("/sitemap.xml", http.HandlerFunc(l.sitemapHandler)).Methods("GET")
r.HandleFunc("/add.html", http.HandlerFunc(l.addFormHandler)).Methods("GET", "POST")
r.HandleFunc("/list/{listId}/{slug}", http.HandlerFunc(l.listHandler)).Methods("GET", "POST")
r.HandleFunc("/list/{listId}/{itemId}/vote.html", http.HandlerFunc(l.voteHandler)).Methods("GET", "POST")
http.Handle("/", r)
if err := http.ListenAndServe(l.config.Server, nil); err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
func (l *Listboard) indexHandler(w http.ResponseWriter, r *http.Request) {
pageStr := r.URL.Query().Get("hostname")
page := 0
var err error
if len(pageStr) != 0 {
page, err = strconv.Atoi(pageStr)
if err != nil {
log.Printf("%s is not a valid page number", pageStr)
page = 0
}
}
sc := l.db.getSiteConfig("token")
data := NewTemplateData(sc)
data["Lists"] = l.db.getChildNodes(0, itemsPerPage, page, "updated")
render(&data, w, r, "templates/layout.html", "templates/index.html")
}
func (l *Listboard) addFormHandler(w http.ResponseWriter, r *http.Request) {
sc := l.db.getSiteConfig("token")
var errors ValidationErrors
var node Node
if r.Method == "POST" {
if !inHoneypot(r.FormValue("name")) {
node, errors = validateForm(r, 0)
if errors == nil {
// save and redirect
l.db.addNode(&node)
http.Redirect(w, r, "/", http.StatusFound)
}
}
}
data := NewTemplateData(sc)
data["Errors"] = errors
data["Form"] = node
render(&data, w, r, "templates/layout.html", "templates/add.html", "templates/form.html")
}
func (l *Listboard) listHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
listId, err := strconv.Atoi(vars["listId"])
if err != nil {
log.Printf("%s is not a valid list number", listId)
http.Error(w, "Not found", http.StatusNotFound)
return
}
sc := l.db.getSiteConfig("token")
var errors ValidationErrors
var node Node
if r.Method == "POST" {
if !inHoneypot(r.FormValue("name")) {
node, errors = validateForm(r, listId)
if errors == nil {
// save and redirect
l.db.addNode(&node)
http.Redirect(w, r, "/", http.StatusFound)
}
}
}
data := NewTemplateData(sc)
data["Errors"] = errors
data["Form"] = node
data["List"] = l.db.getNode(listId)
data["Items"] = l.db.getChildNodes(listId, itemsPerPage, 0, "votes")
render(&data, w, r, "templates/layout.html", "templates/list.html", "templates/form.html")
}
func (l *Listboard) voteHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
listId, err := strconv.Atoi(vars["listId"])
if err != nil {
log.Printf("%s is not a valid list number", listId)
http.Error(w, "Not found", http.StatusNotFound)
return
}
itemId, err := strconv.Atoi(vars["itemId"])
if err != nil {
log.Printf("%s is not a valid item number", listId)
http.Error(w, "Not found", http.StatusNotFound)
return
}
sc := l.db.getSiteConfig("token")
var errors ValidationErrors
var node Node
if r.Method == "POST" {
if !inHoneypot(r.FormValue("name")) {
node, errors = validateForm(r, itemId)
if errors == nil {
// save and redirect
l.db.addNode(&node)
http.Redirect(w, r, "/", http.StatusFound)
}
}
}
data := NewTemplateData(sc)
data["ShowVote"] = true
data["Errors"] = errors
data["Form"] = node
data["List"] = l.db.getNode(listId)
data["Item"] = l.db.getNode(itemId)
data["Items"] = l.db.getChildNodes(itemId, itemsPerPage, 0, "created")
render(&data, w, r, "templates/layout.html", "templates/vote.html", "templates/form.html")
}
func (l *Listboard) feedHandler(w http.ResponseWriter, r *http.Request) {
sc := l.db.getSiteConfig("token")
feed := &Feed{
Title: sc.Title,
Link: &Link{Href: "http://" + r.Host + "/"},
Description: sc.Description,
Author: &Author{sc.AuthorName, sc.AuthorEmail},
Created: time.Now(),
}
nodes := l.db.getChildNodes(0, 20, 0, "created")
for _, node := range *nodes {
feed.Items = append(feed.Items, &Item{
Title: node.Title,
Link: &Link{Href: "http://" + r.Host + "/list/" + strconv.Itoa(node.Id) + "/" + hfSlug(node.Title)},
Description: string(node.Rendered),
Created: node.Created,
})
}
w.Header().Set("Content-Type", "application/rss+xml")
err := feed.WriteRss(w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (l *Listboard) feedAlllHandler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("NOT IMPLEMENTED"))
}
func (l *Listboard) sitemapHandler(w http.ResponseWriter, r *http.Request) {
nodes := l.db.getChildNodes(0, 1000, 0, "created")
var urlSet sitemap.URLSet
for _, node := range *nodes {
urlSet.URLs = append(urlSet.URLs, sitemap.URL{
Loc: "http://" + r.Host + "/list/" + strconv.Itoa(node.Id) + "/" + hfSlug(node.Title),
LastMod: &node.Created,
ChangeFreq: sitemap.Daily,
Priority: 0.7,
})
}
xml, err := sitemap.Marshal(&urlSet)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/xml")
w.Write(xml)
}
func validateForm(r *http.Request, parentId int) (Node, ValidationErrors) {
node := Node{
ParentId: parentId,
Title: strings.TrimSpace(r.FormValue("title")),
Vote: getVote(r.FormValue("vote")),
Tripcode: tripcode(r.FormValue("password")),
Body: r.FormValue("body"),
}
errors := ValidationErrors{}
if len(node.Title) < 3 {
errors = append(errors, hfLang("Title must be at least 3 characters long"))
}
if len(node.Title) < 10 {
errors = append(errors, hfLang("Please, write something"))
}
if len(errors) == 0 {
node.Rendered = renderText(node.Body)
}
return node, errors
}
func renderText(t string) template.HTML {
return template.HTML(t)
}
Fixed sprintf-s
package main
import (
. "github.com/gorilla/feeds"
"github.com/gorilla/mux"
"github.com/sourcegraph/sitemap"
"html/template"
"log"
"net/http"
"strconv"
"strings"
"time"
)
const (
itemsPerPage = 10
)
type Listboard struct {
config *Config
db *Database
}
type TemplateData map[string]interface{}
type ValidationErrors []string
var helperFuncs = template.FuncMap{
"lang": hfLang,
"time": hfTime,
"slug": hfSlug,
}
func NewListboard() *Listboard {
return &Listboard{}
}
func NewTemplateData(sc *SiteConfig) TemplateData {
td := make(TemplateData)
td["Title"] = "Title is not defined"
td["ShowVote"] = false
return td
}
func render(data *TemplateData, w http.ResponseWriter, r *http.Request, filenames ...string) {
t := template.New("layout.html")
t.Funcs(helperFuncs)
if err := template.Must(t.ParseFiles(filenames...)).Execute(w, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
func (l *Listboard) Run() {
l.config = NewConfig()
l.db = NewDatabase(l.config)
r := mux.NewRouter()
r.HandleFunc("/", http.HandlerFunc(l.indexHandler)).Methods("GET")
r.HandleFunc("/feed.xml", http.HandlerFunc(l.feedHandler)).Methods("GET")
r.HandleFunc("/all.xml", http.HandlerFunc(l.feedAlllHandler)).Methods("GET")
r.HandleFunc("/sitemap.xml", http.HandlerFunc(l.sitemapHandler)).Methods("GET")
r.HandleFunc("/add.html", http.HandlerFunc(l.addFormHandler)).Methods("GET", "POST")
r.HandleFunc("/list/{listId}/{slug}", http.HandlerFunc(l.listHandler)).Methods("GET", "POST")
r.HandleFunc("/list/{listId}/{itemId}/vote.html", http.HandlerFunc(l.voteHandler)).Methods("GET", "POST")
http.Handle("/", r)
if err := http.ListenAndServe(l.config.Server, nil); err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
func (l *Listboard) indexHandler(w http.ResponseWriter, r *http.Request) {
pageStr := r.URL.Query().Get("hostname")
page := 0
var err error
if len(pageStr) != 0 {
page, err = strconv.Atoi(pageStr)
if err != nil {
log.Printf("%s is not a valid page number", pageStr)
page = 0
}
}
sc := l.db.getSiteConfig("token")
data := NewTemplateData(sc)
data["Lists"] = l.db.getChildNodes(0, itemsPerPage, page, "updated")
render(&data, w, r, "templates/layout.html", "templates/index.html")
}
func (l *Listboard) addFormHandler(w http.ResponseWriter, r *http.Request) {
sc := l.db.getSiteConfig("token")
var errors ValidationErrors
var node Node
if r.Method == "POST" {
if !inHoneypot(r.FormValue("name")) {
node, errors = validateForm(r, 0)
if errors == nil {
// save and redirect
l.db.addNode(&node)
http.Redirect(w, r, "/", http.StatusFound)
}
}
}
data := NewTemplateData(sc)
data["Errors"] = errors
data["Form"] = node
render(&data, w, r, "templates/layout.html", "templates/add.html", "templates/form.html")
}
func (l *Listboard) listHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
listId, err := strconv.Atoi(vars["listId"])
if err != nil {
log.Printf("%d is not a valid list number", listId)
http.Error(w, "Not found", http.StatusNotFound)
return
}
sc := l.db.getSiteConfig("token")
var errors ValidationErrors
var node Node
if r.Method == "POST" {
if !inHoneypot(r.FormValue("name")) {
node, errors = validateForm(r, listId)
if errors == nil {
// save and redirect
l.db.addNode(&node)
http.Redirect(w, r, "/", http.StatusFound)
}
}
}
data := NewTemplateData(sc)
data["Errors"] = errors
data["Form"] = node
data["List"] = l.db.getNode(listId)
data["Items"] = l.db.getChildNodes(listId, itemsPerPage, 0, "votes")
render(&data, w, r, "templates/layout.html", "templates/list.html", "templates/form.html")
}
func (l *Listboard) voteHandler(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
listId, err := strconv.Atoi(vars["listId"])
if err != nil {
log.Printf("%d is not a valid list number", listId)
http.Error(w, "Not found", http.StatusNotFound)
return
}
itemId, err := strconv.Atoi(vars["itemId"])
if err != nil {
log.Printf("%d is not a valid item number", listId)
http.Error(w, "Not found", http.StatusNotFound)
return
}
sc := l.db.getSiteConfig("token")
var errors ValidationErrors
var node Node
if r.Method == "POST" {
if !inHoneypot(r.FormValue("name")) {
node, errors = validateForm(r, itemId)
if errors == nil {
// save and redirect
l.db.addNode(&node)
http.Redirect(w, r, "/", http.StatusFound)
}
}
}
data := NewTemplateData(sc)
data["ShowVote"] = true
data["Errors"] = errors
data["Form"] = node
data["List"] = l.db.getNode(listId)
data["Item"] = l.db.getNode(itemId)
data["Items"] = l.db.getChildNodes(itemId, itemsPerPage, 0, "created")
render(&data, w, r, "templates/layout.html", "templates/vote.html", "templates/form.html")
}
func (l *Listboard) feedHandler(w http.ResponseWriter, r *http.Request) {
sc := l.db.getSiteConfig("token")
feed := &Feed{
Title: sc.Title,
Link: &Link{Href: "http://" + r.Host + "/"},
Description: sc.Description,
Author: &Author{sc.AuthorName, sc.AuthorEmail},
Created: time.Now(),
}
nodes := l.db.getChildNodes(0, 20, 0, "created")
for _, node := range *nodes {
feed.Items = append(feed.Items, &Item{
Title: node.Title,
Link: &Link{Href: "http://" + r.Host + "/list/" + strconv.Itoa(node.Id) + "/" + hfSlug(node.Title)},
Description: string(node.Rendered),
Created: node.Created,
})
}
w.Header().Set("Content-Type", "application/rss+xml")
err := feed.WriteRss(w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (l *Listboard) feedAlllHandler(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("NOT IMPLEMENTED"))
}
func (l *Listboard) sitemapHandler(w http.ResponseWriter, r *http.Request) {
nodes := l.db.getChildNodes(0, 1000, 0, "created")
var urlSet sitemap.URLSet
for _, node := range *nodes {
urlSet.URLs = append(urlSet.URLs, sitemap.URL{
Loc: "http://" + r.Host + "/list/" + strconv.Itoa(node.Id) + "/" + hfSlug(node.Title),
LastMod: &node.Created,
ChangeFreq: sitemap.Daily,
Priority: 0.7,
})
}
xml, err := sitemap.Marshal(&urlSet)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/xml")
w.Write(xml)
}
func validateForm(r *http.Request, parentId int) (Node, ValidationErrors) {
node := Node{
ParentId: parentId,
Title: strings.TrimSpace(r.FormValue("title")),
Vote: getVote(r.FormValue("vote")),
Tripcode: tripcode(r.FormValue("password")),
Body: r.FormValue("body"),
}
errors := ValidationErrors{}
if len(node.Title) < 3 {
errors = append(errors, hfLang("Title must be at least 3 characters long"))
}
if len(node.Title) < 10 {
errors = append(errors, hfLang("Please, write something"))
}
if len(errors) == 0 {
node.Rendered = renderText(node.Body)
}
return node, errors
}
func renderText(t string) template.HTML {
return template.HTML(t)
}
|
package dawa
import (
"bufio"
"github.com/ugorji/go/codec"
"io"
"encoding/json"
"fmt"
"io/ioutil"
"reflect"
"strconv"
)
// ListQuery returns query item for searching DAWA for specific list types.
// Use dawa.NewListQuery(type string, autocomplete bool) to create a new query.
//
// See 'examples/query-list.go' for a usage example.
//
// See documentation at http://dawa.aws.dk/listerdok
type ListQuery struct {
queryGeoJSON
listType string
}
// ListQuery returns query item for searching DAWA for specific list types.
//
// Supported list types are "regioner","sogne","retskredse","politikredse","opstillingskredse","valglandsdele","ejerlav".
// Use the corresponding iterator function, for instance i.NextRegion() to get typed results.
//
// See 'examples/query-list.go' for a usage example.
//
// See documentation at http://dawa.aws.dk/listerdok
func NewListQuery(listType string, autoComplete bool) *ListQuery {
path := "/" + listType
if autoComplete {
path += "/autocomplete"
}
q := &ListQuery{listType: listType, queryGeoJSON: queryGeoJSON{query: query{host: DefaultHost, path: path}}}
return q
}
// Q will add a parameter for 'q' to the ListQuery.
//
// Søgetekst. Der søges i kode og navn. Alle ord i søgeteksten skal matche. Wildcard * er tilladt i slutningen af hvert ord.
//
// See http://dawa.aws.dk/listerdok
func (q *ListQuery) Q(s string) *ListQuery {
q.add(textQuery{Name: "q", Values: []string{s}, Multi: true, Null: false})
return q
}
// Kode will add a parameter for 'kode' to the ListQuery.
//
// Kode for det der søges.
func (q *ListQuery) Kode(s ...string) *ListQuery {
q.add(textQuery{Name: "kode", Values: s, Multi: true, Null: false})
return q
}
// Navn will add a parameter for 'navn' to the ListQuery.
//
// Navn for det der søges.
func (q *ListQuery) Navn(s string) *ListQuery {
q.add(textQuery{Name: "navn", Values: []string{s}, Multi: true, Null: false})
return q
}
// NoFormat will disable extra whitespace. Always enabled when querying
func (q *ListQuery) NoFormat() *ListQuery {
q.add(textQuery{Name: "noformat", Multi: false, Null: true})
return q
}
// ListIter is an Iterator that enable you to get individual entries.
type ListIter struct {
closer
a reflect.Value // Channel
eType reflect.Type // Type of the element
err error
}
func makeChannel(t reflect.Type, chanDir reflect.ChanDir, buffer int) reflect.Value {
ctype := reflect.ChanOf(chanDir, t)
return reflect.MakeChan(ctype, buffer)
}
// Iter creates a list iterator that will allow you to get the items one by one.
//
func (q ListQuery) Iter() (*ListIter, error) {
resp, err := q.NoFormat().Request()
if err != nil {
return nil, err
}
typ := q.Type()
if typ == nil {
return nil, fmt.Errorf("Unknown list type: %s", q.listType)
}
var h codec.JsonHandle
h.DecodeOptions.ErrorIfNoField = JSONStrictFieldCheck
// use a buffered reader for efficiency
in := bufio.NewReader(resp)
ret := &ListIter{}
ret.eType = reflect.TypeOf(typ)
// We create a channel with the expected type
ret.a = makeChannel(ret.eType, reflect.BothDir, 100)
go func() {
defer ret.a.Close()
var dec *codec.Decoder = codec.NewDecoder(in, &h)
channel := ret.a.Interface()
ret.err = dec.Decode(&channel)
if ret.err == nil {
ret.err = io.EOF
}
}()
if err != nil {
return nil, err
}
ret.AddCloser(resp)
return ret, nil
}
// Returns a writeable
func (q ListQuery) Type() interface{} {
switch q.listType {
case "regioner":
return &Region{}
case "kommuner":
return &Kommune{}
case "sogne":
return &Sogn{}
case "retskredse":
return &Retskreds{}
case "politikredse":
return &Politikreds{}
case "opstillingskredse":
return &Opstillingskreds{}
case "valglandsdele":
return &Valglandsdel{}
case "ejerlav":
return &Ejerlav{}
}
return nil
}
// Next will return the next item untyped.
// It will return an error if that has been encountered.
// When there are not more entries nil, io.EOF will be returned.
func (a *ListIter) Next() (interface{}, error) {
v, ok := a.a.Recv()
if ok {
return v.Interface(), nil
}
return nil, a.err
}
// NextKommune will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextKommune() (*Kommune, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Kommune{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Kommune), nil
}
// NextRegion will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextRegion() (*Region, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Region{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Region), nil
}
// NextSogn will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextSogn() (*Sogn, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Sogn{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Sogn), nil
}
// NextRetskreds will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextRetskreds() (*Retskreds, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Retskreds{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Retskreds), nil
}
// NextPolitikreds will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextPolitikreds() (*Politikreds, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Politikreds{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Politikreds), nil
}
// NextOpstillingskreds will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextOpstillingskreds() (*Opstillingskreds, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Opstillingskreds{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Opstillingskreds), nil
}
// NextValglandsdel will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextValglandsdel() (*Valglandsdel, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Valglandsdel{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Valglandsdel), nil
}
// NextEjerlav will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextEjerlav() (*Ejerlav, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Ejerlav{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Ejerlav), nil
}
// NewReverseQuery will create a reverse location to item lookup. Parameters are:
//
// * listType: See NewListQuery() for valid options.
// * x: X koordinat. (Hvis ETRS89/UTM32 anvendes angives øst-værdien.) Hvis WGS84/geografisk anvendex angives bredde-værdien.
// * y: Y koordinat. (Hvis ETRS89/UTM32 anvendes angives nord-værdien.) Hvis WGS84/geografisk anvendex angives længde-værdien.
// * srid: Angiver SRID for det koordinatsystem, som geospatiale parametre er angivet i. Default er 4326 (WGS84). Leave this empty for default value
//
// See examples/query-list-reverse.go for usage example
//
// An iterator will be returned, but it will only contain zero or one values.
func NewReverseQuery(listType string, x, y float64, srid string) (*ListIter, error) {
path := "/" + listType + "/reverse"
q := &ListQuery{listType: listType, queryGeoJSON: queryGeoJSON{query: query{host: DefaultHost, path: path}}}
typ := q.Type()
if typ == nil {
return nil, fmt.Errorf("unknown list type '%s'", listType)
}
q.add(textQuery{Name: "x", Values: []string{strconv.FormatFloat(x, 'f', -1, 64)}, Multi: false, Null: false})
q.add(textQuery{Name: "y", Values: []string{strconv.FormatFloat(y, 'f', -1, 64)}, Multi: false, Null: false})
if srid != "" {
q.add(textQuery{Name: "srid", Values: []string{srid}, Multi: false, Null: false})
}
// Execute request
resp, err := q.NoFormat().Request()
if err != nil {
return nil, err
}
defer resp.Close()
// We create the iterator and fill it with data.
ret := &ListIter{}
ret.eType = reflect.TypeOf(typ)
ret.a = makeChannel(ret.eType, reflect.BothDir, 1)
all, err := ioutil.ReadAll(resp)
if err != nil {
return nil, err
}
// Decode to typ
err = json.Unmarshal(all, typ)
if err != nil {
return nil, err
}
ret.a.Send(reflect.ValueOf(typ))
ret.a.Close()
ret.err = io.EOF
return ret, nil
}
Add all types that support reverse geocoding.
package dawa
import (
"bufio"
"encoding/json"
"fmt"
"github.com/ugorji/go/codec"
"io"
"io/ioutil"
"reflect"
"strconv"
)
// ListQuery returns query item for searching DAWA for specific list types.
// Use dawa.NewListQuery(type string, autocomplete bool) to create a new query.
//
// See 'examples/query-list.go' for a usage example.
//
// See documentation at http://dawa.aws.dk/listerdok
type ListQuery struct {
queryGeoJSON
listType string
}
// ListQuery returns query item for searching DAWA for specific list types.
//
// Supported list types are "regioner","sogne","retskredse","politikredse","opstillingskredse","valglandsdele","ejerlav", "adgangsadresser", "adresser" or "postnumre".
// Use the corresponding iterator function, for instance i.NextRegion() to get typed results.
//
// See 'examples/query-list.go' for a usage example.
//
// See documentation at http://dawa.aws.dk/listerdok
func NewListQuery(listType string, autoComplete bool) *ListQuery {
path := "/" + listType
if autoComplete {
path += "/autocomplete"
}
q := &ListQuery{listType: listType, queryGeoJSON: queryGeoJSON{query: query{host: DefaultHost, path: path}}}
return q
}
// Q will add a parameter for 'q' to the ListQuery.
//
// Søgetekst. Der søges i kode og navn. Alle ord i søgeteksten skal matche. Wildcard * er tilladt i slutningen af hvert ord.
//
// See http://dawa.aws.dk/listerdok
func (q *ListQuery) Q(s string) *ListQuery {
q.add(textQuery{Name: "q", Values: []string{s}, Multi: true, Null: false})
return q
}
// Kode will add a parameter for 'kode' to the ListQuery.
//
// Kode for det der søges.
func (q *ListQuery) Kode(s ...string) *ListQuery {
q.add(textQuery{Name: "kode", Values: s, Multi: true, Null: false})
return q
}
// Navn will add a parameter for 'navn' to the ListQuery.
//
// Navn for det der søges.
func (q *ListQuery) Navn(s string) *ListQuery {
q.add(textQuery{Name: "navn", Values: []string{s}, Multi: true, Null: false})
return q
}
// NoFormat will disable extra whitespace. Always enabled when querying
func (q *ListQuery) NoFormat() *ListQuery {
q.add(textQuery{Name: "noformat", Multi: false, Null: true})
return q
}
// ListIter is an Iterator that enable you to get individual entries.
type ListIter struct {
closer
a reflect.Value // Channel
eType reflect.Type // Type of the element
err error
}
func makeChannel(t reflect.Type, chanDir reflect.ChanDir, buffer int) reflect.Value {
ctype := reflect.ChanOf(chanDir, t)
return reflect.MakeChan(ctype, buffer)
}
// Iter creates a list iterator that will allow you to get the items one by one.
//
func (q ListQuery) Iter() (*ListIter, error) {
resp, err := q.NoFormat().Request()
if err != nil {
return nil, err
}
typ := q.Type()
if typ == nil {
return nil, fmt.Errorf("Unknown list type: %s", q.listType)
}
var h codec.JsonHandle
h.DecodeOptions.ErrorIfNoField = JSONStrictFieldCheck
// use a buffered reader for efficiency
in := bufio.NewReader(resp)
ret := &ListIter{}
ret.eType = reflect.TypeOf(typ)
// We create a channel with the expected type
ret.a = makeChannel(ret.eType, reflect.BothDir, 100)
go func() {
defer ret.a.Close()
var dec *codec.Decoder = codec.NewDecoder(in, &h)
channel := ret.a.Interface()
ret.err = dec.Decode(&channel)
if ret.err == nil {
ret.err = io.EOF
}
}()
if err != nil {
return nil, err
}
ret.AddCloser(resp)
return ret, nil
}
// Returns a writeable
func (q ListQuery) Type() interface{} {
switch q.listType {
case "regioner":
return &Region{}
case "kommuner":
return &Kommune{}
case "sogne":
return &Sogn{}
case "retskredse":
return &Retskreds{}
case "politikredse":
return &Politikreds{}
case "opstillingskredse":
return &Opstillingskreds{}
case "valglandsdele":
return &Valglandsdel{}
case "ejerlav":
return &Ejerlav{}
case "adgangsadresser":
return &AdgangsAdresse{}
case "adresser":
return &Adresse{}
case "postnumre":
return &Postnummer{}
}
return nil
}
// Next will return the next item untyped.
// It will return an error if that has been encountered.
// When there are not more entries nil, io.EOF will be returned.
func (a *ListIter) Next() (interface{}, error) {
v, ok := a.a.Recv()
if ok {
return v.Interface(), nil
}
return nil, a.err
}
// NextKommune will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextKommune() (*Kommune, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Kommune{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Kommune), nil
}
// NextRegion will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextRegion() (*Region, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Region{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Region), nil
}
// NextSogn will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextSogn() (*Sogn, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Sogn{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Sogn), nil
}
// NextRetskreds will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextRetskreds() (*Retskreds, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Retskreds{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Retskreds), nil
}
// NextPolitikreds will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextPolitikreds() (*Politikreds, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Politikreds{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Politikreds), nil
}
// NextOpstillingskreds will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextOpstillingskreds() (*Opstillingskreds, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Opstillingskreds{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Opstillingskreds), nil
}
// NextValglandsdel will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextValglandsdel() (*Valglandsdel, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Valglandsdel{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Valglandsdel), nil
}
// NextEjerlav will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextEjerlav() (*Ejerlav, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Ejerlav{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Ejerlav), nil
}
// NextAdgangsAdresse will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextAdgangsAdresse() (*AdgangsAdresse, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&AdgangsAdresse{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*AdgangsAdresse), nil
}
// NextAdresse will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextAdresse() (*Adresse, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Adresse{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Adresse), nil
}
// NextPostnummer will return the next item.
// The query must be built using the corresponding type. See NewListQuery() function.
func (a *ListIter) NextPostnummer() (*Postnummer, error) {
if !a.eType.ConvertibleTo(reflect.TypeOf(&Postnummer{})) {
return nil, fmt.Errorf("Wrong type requested from iterator. Expected %s", a.eType.String())
}
item, err := a.Next()
if err != nil {
return nil, a.err
}
return item.(*Postnummer), nil
}
// NewReverseQuery will create a reverse location to item lookup. Parameters are:
//
// * listType: See NewListQuery() for valid options.
// * x: X koordinat. (Hvis ETRS89/UTM32 anvendes angives øst-værdien.) Hvis WGS84/geografisk anvendex angives bredde-værdien.
// * y: Y koordinat. (Hvis ETRS89/UTM32 anvendes angives nord-værdien.) Hvis WGS84/geografisk anvendex angives længde-værdien.
// * srid: Angiver SRID for det koordinatsystem, som geospatiale parametre er angivet i. Default er 4326 (WGS84). Leave this empty for default value
//
// See examples/query-list-reverse.go for usage example
//
// An iterator will be returned, but it will only contain zero or one values.
func NewReverseQuery(listType string, x, y float64, srid string) (*ListIter, error) {
path := "/" + listType + "/reverse"
q := &ListQuery{listType: listType, queryGeoJSON: queryGeoJSON{query: query{host: DefaultHost, path: path}}}
typ := q.Type()
if typ == nil {
return nil, fmt.Errorf("unknown list type '%s'", listType)
}
q.add(textQuery{Name: "x", Values: []string{strconv.FormatFloat(x, 'f', -1, 64)}, Multi: false, Null: false})
q.add(textQuery{Name: "y", Values: []string{strconv.FormatFloat(y, 'f', -1, 64)}, Multi: false, Null: false})
if srid != "" {
q.add(textQuery{Name: "srid", Values: []string{srid}, Multi: false, Null: false})
}
// Execute request
resp, err := q.NoFormat().Request()
if err != nil {
return nil, err
}
defer resp.Close()
// We create the iterator and fill it with data.
ret := &ListIter{}
ret.eType = reflect.TypeOf(typ)
ret.a = makeChannel(ret.eType, reflect.BothDir, 1)
all, err := ioutil.ReadAll(resp)
if err != nil {
return nil, err
}
// Decode to typ
err = json.Unmarshal(all, typ)
if err != nil {
return nil, err
}
ret.a.Send(reflect.ValueOf(typ))
ret.a.Close()
ret.err = io.EOF
return ret, nil
}
|
// Copyright (c) 2010 AFP Authors
// This source code is released under the terms of the
// MIT license. Please see the file LICENSE for license details.
package util
import (
"afp"
"runtime"
)
//Buffer will buffer n frames except in the case that the stream
//is closed before n frames have been read.
//For performance reasons, This should be used only where n >> CHAN_BUF_LEN
//due to the necessity of buffering samples in a second channel.
//For n <= CHAN_BUF_LEN Buffer will delegate to FastBuffer.
func Buffer(n int, source <-chan [][]float32) <-chan [][]float32 {
if n <= afp.CHAN_BUF_LEN {
FastBuffer(n, source)
return source;
}
buff := make(chan [][]float32, n)
buffered := 0
for s := range source {
buff <- s
if buffered >= n {
break
}
}
//We need to copy all subsequent frames
//sent into the chan the caller will
//now be reading from.
go func() {
for s := range source {
buff <- s
}
}()
return buff
}
//Will buffer at least n and at most CHAN_BUF_LEN
//frames before returning.
func FastBuffer(n int, source <-chan [][]float32) int {
if n > afp.CHAN_BUF_LEN {
n = afp.CHAN_BUF_LEN
}
for len(source) < n && !closed(source) {
runtime.Gosched()
}
//This is racey, but can only cause us to buffer more
//than requested. Shouldn't be a problem.
return len(source)
}
Incrementing loop variables is a good idea
// Copyright (c) 2010 AFP Authors
// This source code is released under the terms of the
// MIT license. Please see the file LICENSE for license details.
package util
import (
"afp"
"runtime"
)
//Buffer will buffer n frames except in the case that the stream
//is closed before n frames have been read.
//For performance reasons, This should be used only where n >> CHAN_BUF_LEN
//due to the necessity of buffering samples in a second channel.
//For n <= CHAN_BUF_LEN Buffer will delegate to FastBuffer.
func Buffer(n int, source <-chan [][]float32) <-chan [][]float32 {
if n <= afp.CHAN_BUF_LEN {
FastBuffer(n, source)
return source;
}
buff := make(chan [][]float32, n)
buffered := 0
for s := range source {
buff <- s
buffered++
println(buffered)
if buffered >= n {
break
}
}
//We need to copy all subsequent frames
//sent into the chan the caller will
//now be reading from.
go func() {
for s := range source {
buff <- s
}
}()
return buff
}
//Will buffer at least n and at most CHAN_BUF_LEN
//frames before returning.
func FastBuffer(n int, source <-chan [][]float32) int {
if n > afp.CHAN_BUF_LEN {
n = afp.CHAN_BUF_LEN
}
for len(source) < n && !closed(source) {
runtime.Gosched()
}
//This is racey, but can only cause us to buffer more
//than requested. Shouldn't be a problem.
return len(source)
} |
//
// Package ep is a collection of structures and functions for working with the E-Prints REST API
//
// @author R. S. Doiel, <rsdoiel@caltech.edu>
//
// Copyright (c) 2017, Caltech
// All rights not granted herein are expressly reserved by Caltech.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
package ep
import (
"encoding/json"
"encoding/xml"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
// Caltech Library packages
"github.com/caltechlibrary/bibtex"
"github.com/caltechlibrary/cli"
"github.com/caltechlibrary/dataset"
)
const (
// Version is the revision number for this implementation of epgo
Version = "v0.0.10-beta1"
// LicenseText holds the string for rendering License info on the command line
LicenseText = `
%s %s
Copyright (c) 2017, Caltech
All rights not granted herein are expressly reserved by Caltech.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.`
// EPrintsExportBatchSize sets the summary output frequency when exporting content from E-Prints
EPrintsExportBatchSize = 1000
// DefaultFeedSize sets the default size of rss, JSON, HTML include and index lists
DefaultFeedSize = 25
)
// These are our main bucket and index buckets
var (
// Primary collection
ePrintBucket = []byte("eprints")
// Select lists delimiter
indexDelimiter = "|"
)
func failCheck(err error, msg string) {
if err != nil {
log.Fatalf("%s\n", msg)
}
}
// EPrintsAPI holds the basic connectin information to read the REST API for EPrints
type EPrintsAPI struct {
XMLName xml.Name `json:"-"`
// EPGO_EPRINT_URL
URL *url.URL `xml:"epgo>eprint_url" json:"eprint_url"`
// EPGO_DATASET
Dataset string `xml:"epgo>dataset" json:"dataset"`
// EPGO_HTDOCS
Htdocs string `xml:"epgo>htdocs" json:"htdocs"`
}
// Person returns the contents of eprint>creators>item>name as a struct
type Person struct {
XMLName xml.Name `json:"-"`
Given string `xml:"name>given" json:"given"`
Family string `xml:"name>family" json:"family"`
ID string `xml:"id,omitempty" json:"id"`
ORCID string `xml:"orcid,omitempty" json:"orcid"`
}
// PersonList is an array of pointers to Person structs
type PersonList []*Person
// RelatedURL is a structure containing information about a relationship
type RelatedURL struct {
XMLName xml.Name `json:"-"`
URL string `xml:"url" json:"url"`
Type string `xml:"type" json:"type"`
Description string `xml:"description" json:"description"`
}
// NumberingSystem is a structure describing other numbering systems for record
type NumberingSystem struct {
XMLName xml.Name `json:"-"`
Name string `xml:"name" json:"name"`
ID string `xml:"id" json:"id"`
}
// Funder is a structure describing a funding source for record
type Funder struct {
XMLName xml.Name `json:"-"`
Agency string `xml:"agency" json:"agency"`
GrantNumber string `xml:"grant_number,omitempty" json:"grant_number"`
}
// FunderList is an array of pointers to Funder structs
type FunderList []*Funder
// File structures in Document
type File struct {
XMLName xml.Name `json:"-"`
ID string `xml:"id,attr" json:"id"`
FileID int `xml:"fileid" json:"fileid"`
DatasetID string `xml:"datasetid" json:"datasetid"`
ObjectID int `xml:"objectid" json:"objectid"`
Filename string `xml:"filename" json:"filename"`
MimeType string `xml:"mime_type" json:"mime_type"`
Hash string `xml:"hash" json:"hash"`
HashType string `xml:"hash_type" json:"hash_type"`
FileSize int `xml:"filesize" json:"filesize"`
MTime string `xml:"mtime" json:"mtime"`
URL string `xml:"url" json:"url"`
}
// Document structures in Record
type Document struct {
XMLName xml.Name `json:"-"`
ID string `xml:"id,attr" json:"id"`
DocID int `xml:"docid" json:"docid"`
RevNumber int `xml:"rev_number" json:"rev_number"`
Files []*File `xml:"files>file" json:"files"`
EPrintID int `xml:"eprintid" json:"eprintid"`
Pos int `xml:"pos" json:"pos"`
Placement int `xml:"placement" json:"placement"`
MimeType string `xml:"mime_type" json:"mime_type"`
Format string `xml:"format" json:"format"`
Language string `xml:"language" json:"language"`
Security string `xml:"security" json:"security"`
License string `xml:"license" json:"license"`
Main string `xml:"main" json:"main"`
Content string `xml:"content" json:"content"`
}
// DocumentList is an array of pointers to Document structs
type DocumentList []*Document
// Record returns a structure that can be converted to JSON easily
type Record struct {
XMLName xml.Name `json:"-"`
Title string `xml:"eprint>title" json:"title"`
URI string `json:"uri"`
Abstract string `xml:"eprint>abstract" json:"abstract"`
Documents DocumentList `xml:"eprint>documents>document" json:"documents"`
Note string `xml:"eprint>note" json:"note"`
ID int `xml:"eprint>eprintid" json:"id"`
RevNumber int `xml:"eprint>rev_number" json:"rev_number"`
UserID int `xml:"eprint>userid" json:"userid"`
Dir string `xml:"eprint>dir" json:"eprint_dir"`
Datestamp string `xml:"eprint>datestamp" json:"datestamp"`
LastModified string `xml:"eprint>lastmod" json:"lastmod"`
StatusChange string `xml:"eprint>status_changed" json:"status_changed"`
Type string `xml:"eprint>type" json:"type"`
MetadataVisibility string `xml:"eprint>metadata_visibility" json:"metadata_visibility"`
Creators PersonList `xml:"eprint>creators>item" json:"creators"`
IsPublished string `xml:"eprint>ispublished" json:"ispublished"`
Subjects []string `xml:"eprint>subjects>item" json:"subjects"`
FullTextStatus string `xml:"eprint>full_text_status" json:"full_text_status"`
Keywords string `xml:"eprint>keywords" json:"keywords"`
Date string `xml:"eprint>date" json:"date"`
DateType string `xml:"eprint>date_type" json:"date_type"`
Publication string `xml:"eprint>publication" json:"publication"`
Volume string `xml:"eprint>volume" json:"volume"`
Number string `xml:"eprint>number" json:"number"`
PageRange string `xml:"eprint>pagerange" json:"pagerange"`
IDNumber string `xml:"eprint>id_number" json:"id_number"`
Referred bool `xml:"eprint>refereed" json:"refereed"`
ISSN string `xml:"eprint>issn" json:"issn"`
OfficialURL string `xml:"eprint>official_url" json:"official_url"`
RelatedURL []*RelatedURL `xml:"eprint>related_url>item" json:"related_url"`
ReferenceText []string `xml:"eprint>referencetext>item" json:"referencetext"`
Rights string `xml:"eprint>rights" json:"rights"`
OfficialCitation string `xml:"eprint>official_cit" json:"official_citation"`
OtherNumberingSystem []*NumberingSystem `xml:"eprint>other_numbering_system>item,omitempty" json:"other_numbering_system"`
Funders FunderList `xml:"eprint>funders>item" json:"funders"`
Collection string `xml:"eprint>collection" json:"collection"`
Reviewer string `xml:"eprint>reviewer" json:"reviewer"`
LocalGroup []string `xml:"eprint>local_group>item" json:"local_group"`
}
type ePrintIDs struct {
XMLName xml.Name `xml:"html" json:"-"`
IDs []string `xml:"body>ul>li>a" json:"ids"`
}
func normalizeDate(in string) string {
var (
x int
err error
)
parts := strings.Split(in, "-")
if len(parts) == 1 {
parts = append(parts, "01")
}
if len(parts) == 2 {
parts = append(parts, "01")
}
for i := 0; i < len(parts); i++ {
x, err = strconv.Atoi(parts[i])
if err != nil {
x = 1
}
if i == 0 {
parts[i] = fmt.Sprintf("%0.4d", x)
} else {
parts[i] = fmt.Sprintf("%0.2d", x)
}
}
return strings.Join(parts, "-")
}
// Pick the first element in an array of strings
func first(s []string) string {
if len(s) > 0 {
return s[0]
}
return ""
}
// Pick the second element in an array of strings
func second(s []string) string {
if len(s) > 1 {
return s[1]
}
return ""
}
// Pick the list element in an array of strings
func last(s []string) string {
l := len(s) - 1
if l >= 0 {
return s[l]
}
return ""
}
// ToBibTeXElement takes an epgo.Record and turns it into a bibtex.Element record
func (rec *Record) ToBibTeXElement() *bibtex.Element {
bib := &bibtex.Element{}
bib.Set("type", rec.Type)
bib.Set("id", fmt.Sprintf("eprint-%d", rec.ID))
bib.Set("title", rec.Title)
if len(rec.Abstract) > 0 {
bib.Set("abstract", rec.Abstract)
}
if rec.DateType == "pub" {
dt, err := time.Parse("2006-01-02", rec.Date)
if err != nil {
bib.Set("year", dt.Format("2006"))
bib.Set("month", dt.Format("January"))
}
}
if len(rec.PageRange) > 0 {
bib.Set("pages", rec.PageRange)
}
/*
if len(rec.Note) > 0 {
bib.Set("note", rec.Note)
}
*/
if len(rec.Creators) > 0 {
people := []string{}
for _, person := range rec.Creators {
people = append(people, fmt.Sprintf("%s, %s", person.Family, person.Given))
}
bib.Set("author", strings.Join(people, " and "))
}
switch rec.Type {
case "article":
bib.Set("journal", rec.Publication)
case "book":
bib.Set("publisher", rec.Publication)
}
if len(rec.Volume) > 0 {
bib.Set("volume", rec.Volume)
}
if len(rec.Number) > 0 {
bib.Set("number", rec.Number)
}
return bib
}
// New creates a new API instance
func New(cfg *cli.Config) (*EPrintsAPI, error) {
var err error
EPrintURL := cfg.Get("eprint_url")
htdocs := cfg.Get("htdocs")
datasetName := cfg.Get("dataset")
api := new(EPrintsAPI)
if EPrintURL == "" {
EPrintURL = "http://localhost:8080"
}
api.URL, err = url.Parse(EPrintURL)
if err != nil {
return nil, fmt.Errorf("eprint url is malformed %s, %s", EPrintURL, err)
}
if htdocs == "" {
htdocs = "htdocs"
}
if datasetName == "" {
datasetName = "eprints"
}
api.Htdocs = htdocs
api.Dataset = datasetName
return api, nil
}
type byURI []string
func (s byURI) Len() int {
return len(s)
}
func (s byURI) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byURI) Less(i, j int) bool {
s1 := strings.TrimSuffix(path.Base(s[i]), path.Ext(s[i]))
s2 := strings.TrimSuffix(path.Base(s[j]), path.Ext(s[j]))
a1, err := strconv.Atoi(s1)
if err != nil {
return false
}
a2, err := strconv.Atoi(s2)
if err != nil {
return false
}
return a1 > a2
}
// ListEPrintsURI returns a list of eprint record ids from the EPrints REST API
func (api *EPrintsAPI) ListEPrintsURI() ([]string, error) {
var (
results []string
)
api.URL.Path = path.Join("rest", "eprint") + "/"
resp, err := http.Get(api.URL.String())
if err != nil {
return nil, fmt.Errorf("requested %s, %s", api.URL.String(), err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("http error %s, %s", api.URL.String(), resp.Status)
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("content can't be read %s, %s", api.URL.String(), err)
}
eIDs := new(ePrintIDs)
err = xml.Unmarshal(content, &eIDs)
if err != nil {
return nil, err
}
// Build a list of Unique IDs in a map, then convert unique querys to results array
m := make(map[string]bool)
for _, val := range eIDs.IDs {
if strings.HasSuffix(val, ".xml") == true {
uri := "/" + path.Join("rest", "eprint", val)
if _, hasID := m[uri]; hasID == false {
// Save the new ID found
m[uri] = true
// Only store Unique IDs in result
results = append(results, uri)
}
}
}
return results, nil
}
// GetEPrint retrieves an EPrint record via REST API
// Returns a Record structure, the raw XML and an error.
func (api *EPrintsAPI) GetEPrint(uri string) (*Record, []byte, error) {
api.URL.Path = uri
resp, err := http.Get(api.URL.String())
if err != nil {
return nil, nil, fmt.Errorf("requested %s, %s", api.URL.String(), err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, nil, fmt.Errorf("http error %s, %s", api.URL.String(), resp.Status)
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, nil, fmt.Errorf("content can't be read %s, %s", api.URL.String(), err)
}
rec := new(Record)
err = xml.Unmarshal(content, &rec)
if err != nil {
return nil, content, err
}
return rec, content, nil
}
// ToNames takes an array of pointers to Person and returns a list of names (family, given)
func (persons PersonList) ToNames() []string {
var result []string
for _, person := range persons {
result = append(result, fmt.Sprintf("%s, %s", person.Family, person.Given))
}
return result
}
// ToORCIDs takes an an array of pointers to Person and returns a list of ORCID ids
func (persons PersonList) ToORCIDs() []string {
var result []string
for _, person := range persons {
result = append(result, person.ORCID)
}
return result
}
// ToAgencies takes an array of pointers to Funders and returns a list of Agency names
func (funders FunderList) ToAgencies() []string {
var result []string
for _, funder := range funders {
result = append(result, funder.Agency)
}
return result
}
// ToGrantNumbers takes an array of pointers to Funders and returns a list of Agency names
func (funders FunderList) ToGrantNumbers() []string {
var result []string
for _, funder := range funders {
result = append(result, funder.GrantNumber)
}
return result
}
func (record *Record) PubDate() string {
if record.DateType == "published" {
return record.Date
}
return ""
}
// ListURI returns a list of eprint record ids from the dataset
func (api *EPrintsAPI) ListURI(start, count int) ([]string, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("ListURI() %s, %s", api.Dataset, err))
defer c.Close()
ids := c.Keys()
results := []string{}
if count <= 0 {
count = len(ids) + 1
}
for i := start; count > 0; count-- {
results = append(results, ids[i])
}
return results, nil
}
// Get retrieves an EPrint record from the dataset
func (api *EPrintsAPI) Get(uri string) (*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("Get() %s, %s", api.Dataset, err))
defer c.Close()
record := new(Record)
if err := c.Read(uri, record); err != nil {
return nil, err
}
return record, nil
}
// customLessFn provides a Less() for ascending sorts non pubDate keys and descending sort for pubDate keys
func customLessFn(s []string, i, j int) bool {
a, b := strings.Split(s[i], indexDelimiter), strings.Split(s[j], indexDelimiter)
switch {
// Four part keys
case len(a) == 4 && a[0] == b[0] && a[1] == b[1] && a[2] == b[2] && a[3] < a[3]:
return true
case len(a) == 4 && a[0] == b[0] && a[1] == b[1] && a[2] > b[2]:
return true
case len(a) == 4 && a[0] == b[0] && a[1] < b[1]:
return true
case len(a) == 4 && a[0] < b[0]:
return true
// Three part keys
case len(a) == 3 && a[0] == b[0] && a[1] == b[1] && a[2] < b[2]:
return true
case len(a) == 3 && a[0] == b[0] && a[1] > b[1]:
return true
case len(a) == 3 && a[0] < b[0]:
return true
// Two part keys
case len(a) == 2 && a[0] == b[0] && a[1] < b[1]:
return true
case len(a) == 2 && a[0] > b[0]:
return true
// Single Keys
case len(a) == 1 && a[0] < b[0]:
return true
}
return false
}
// GetIDsBySelectList returns a list of ePrint IDs from a select list filterd by filterFn
// SelectLists are sorted by creation...
func (api *EPrintsAPI) GetIDsBySelectList(slName string, filterFn func(s string) bool) ([]string, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetIDs() %s, %s", api.Dataset, err))
defer c.Close()
sl, err := c.Select(slName)
if err != nil {
return nil, err
}
ids := []string{}
for _, id := range sl.List() {
if filterFn(id) == true {
ids = append(ids, last(strings.Split(id, indexDelimiter)))
}
}
return ids, err
}
// getRecordList takes a list of ePrint IDs and filters for start and end count return an array of records
func getRecordList(c *dataset.Collection, ePrintIDs []string, start int, count int, filterFn func(*Record) bool) ([]*Record, error) {
results := []*Record{}
i := 0
if count <= 0 {
count = len(ePrintIDs) + 1
}
for _, id := range ePrintIDs {
rec := new(Record)
if err := c.Read(id, &rec); err != nil {
return results, err
}
if filterFn(rec) == true {
if i >= start {
results = append(results, rec)
}
i++
count--
if count <= 0 {
return results, nil
}
}
}
return results, nil
}
// GetAllRecords reads and returns all records sorted by Publication Date
// returning an array of keys in ascending or decending order
func (api *EPrintsAPI) GetAllRecords() ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetAllRecords() %s, %s", api.Dataset, err))
defer c.Close()
ids, err := api.GetIDsBySelectList("pubDate", func(s string) bool {
return true
})
if err != nil {
return nil, err
}
// Build a select list in descending publication order
return getRecordList(c, ids, 0, -1, func(rec *Record) bool {
return true
})
}
// GetPublications reads the index for published content and returns a populated
// array of records found in index in ascending or decending order
func (api *EPrintsAPI) GetPublications(start, count int) ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetPublications() %s, %s", api.Dataset, err))
defer c.Close()
ids, err := api.GetIDsBySelectList("pubDate", func(s string) bool {
return true
})
if err != nil {
return nil, err
}
return getRecordList(c, ids, start, count, func(rec *Record) bool {
if rec.IsPublished == "pub" {
return true
}
return false
})
}
// GetArticles reads the index for published content and returns a populated
// array of records found in index in decending order
func (api *EPrintsAPI) GetArticles(start, count int) ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetArticles() %s, %s", api.Dataset, err))
defer c.Close()
ids, err := api.GetIDsBySelectList("pubDate", func(s string) bool {
return true
})
if err != nil {
return nil, err
}
return getRecordList(c, ids, start, count, func(rec *Record) bool {
if rec.IsPublished == "pub" && rec.Type == "article" {
return true
}
return false
})
}
// GetLocalGroups returns a JSON list of unique Group names in index
func (api *EPrintsAPI) GetLocalGroups() ([]string, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetLocalGroups() %s, %s", api.Dataset, err))
defer c.Close()
sl, err := c.Select("localGroup")
if err != nil {
return nil, err
}
// Note: Aggregate the local group names
groupNames := []string{}
lastGroup := ""
groupName := []string{}
for _, id := range sl.List() {
groupName = strings.Split(id, indexDelimiter)
if groupName[0] != lastGroup {
groupNames = append(groupNames, groupName[0])
lastGroup = groupName[0]
}
}
return groupNames, nil
}
// GetLocalGroupPublications returns a list of EPrint records with groupName
func (api *EPrintsAPI) GetLocalGroupPublications(groupName string, start, count int) ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetLocalGroupPublications() %s, %s", api.Dataset, err))
defer c.Close()
// Note: Filter for groupName, passing matching eprintIDs to getRecordList()
ids, err := api.GetIDsBySelectList("localGroup", func(s string) bool {
parts := strings.Split(s, indexDelimiter)
grp := first(parts)
if groupName == grp {
return true
}
return false
})
if err != nil {
return nil, err
}
return getRecordList(c, ids, start, count, func(rec *Record) bool {
if rec.IsPublished == "pub" {
return true
}
return false
})
}
// GetLocalGroupArticles returns a list of EPrint records with groupName
func (api *EPrintsAPI) GetLocalGroupArticles(groupName string, start, count int) ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetLocalGroupArticles() %s, %s", api.Dataset, err))
defer c.Close()
// Note: Filter for groupName, passing matching eprintIDs to getRecordList()
ids, err := api.GetIDsBySelectList("localGroup", func(s string) bool {
parts := strings.Split(s, indexDelimiter)
grp := first(parts)
if groupName == grp {
return true
}
return false
})
if err != nil {
return nil, err
}
return getRecordList(c, ids, start, count, func(rec *Record) bool {
if rec.IsPublished == "pub" && rec.Type == "article" {
return true
}
return false
})
}
// GetORCIDs returns a list unique of ORCID IDs in index
func (api *EPrintsAPI) GetORCIDs() ([]string, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetORCIDs() %s, %s", api.Dataset, err))
defer c.Close()
sl, err := c.Select("orcid")
if err != nil {
return nil, err
}
// Note: Filter for orcid, passing matching eprintIDs to getRecordList()
orcids := []string{}
lastORCID := ""
for _, id := range sl.List() {
orcid := first(strings.Split(id, indexDelimiter))
if orcid != lastORCID {
lastORCID = orcid
orcids = append(orcids, orcid)
}
}
return orcids, nil
}
// GetORCIDPublications returns a list of EPrint records with a given ORCID
func (api *EPrintsAPI) GetORCIDPublications(orcid string, start, count int) ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetORCIDPublications() %s, %s", api.Dataset, err))
defer c.Close()
// Note: Filter for orcid, passing matching eprintIDs to getRecordList()
ids, err := api.GetIDsBySelectList("orcid", func(s string) bool {
key := first(strings.Split(s, indexDelimiter))
if orcid == key {
return true
}
return false
})
if err != nil {
return nil, err
}
return getRecordList(c, ids, start, count, func(rec *Record) bool {
if rec.IsPublished == "pub" {
return true
}
return false
})
}
// GetORCIDArticles returns a list of EPrint records with a given ORCID
func (api *EPrintsAPI) GetORCIDArticles(orcid string, start, count int) ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetORCIDArticles() %s, %s", api.Dataset, err))
defer c.Close()
// Note: Filter for orcid, passing matching eprintIDs to getRecordList()
ids, err := api.GetIDsBySelectList("orcid", func(s string) bool {
key := first(strings.Split(s, indexDelimiter))
if orcid == key {
return true
}
return false
})
if err != nil {
return nil, err
}
return getRecordList(c, ids, start, count, func(rec *Record) bool {
if rec.IsPublished == "pub" && rec.Type == "article" {
return true
}
return false
})
}
// RenderEPrint writes a single EPrint record to disc.
func (api *EPrintsAPI) RenderEPrint(basepath string, record *Record) error {
// Convert record to JSON
src, err := json.Marshal(record)
if err != nil {
return err
}
fname := path.Join(basepath, fmt.Sprintf("%d.json", record.ID))
return ioutil.WriteFile(fname, src, 0664)
}
// RenderDocuments writes JSON, BibTeX documents to the directory indicated by docpath
func (api *EPrintsAPI) RenderDocuments(docTitle, docDescription, docpath string, records []*Record) error {
// Create the the directory part of docpath if neccessary
if _, err := os.Open(path.Join(api.Htdocs, docpath)); err != nil && os.IsNotExist(err) == true {
os.MkdirAll(path.Join(api.Htdocs, path.Dir(docpath)), 0775)
}
// Writing JSON file
fname := path.Join(api.Htdocs, docpath+".json")
src, err := json.Marshal(records)
if err != nil {
return fmt.Errorf("Can't convert records to JSON %s, %s", fname, err)
}
err = ioutil.WriteFile(fname, src, 0664)
if err != nil {
return fmt.Errorf("Can't write %s, %s", fname, err)
}
// Write out BibTeX file.
bibDoc := []string{}
for _, rec := range records {
bibDoc = append(bibDoc, rec.ToBibTeXElement().String())
}
fname = path.Join(api.Htdocs, docpath+".bib")
err = ioutil.WriteFile(fname, []byte(strings.Join(bibDoc, "\n\n")), 0664)
if err != nil {
return fmt.Errorf("Can't write %s, %s", fname, err)
}
return nil
}
// BuildPages generates JSON and BibTeX versions of collected records
// by calling RenderDocuments with the appropriate data.
func (api *EPrintsAPI) BuildPages(feedSize int, title, target string, filter func(*EPrintsAPI, int, int) ([]*Record, error)) error {
if feedSize < 1 {
feedSize = DefaultFeedSize
}
docPath := path.Join(api.Htdocs, target)
// Collect the published records
records, err := filter(api, 0, feedSize)
if err != nil {
return err
}
if len(records) == 0 {
return fmt.Errorf("Zero records for %q, %s", title, docPath)
}
log.Printf("%d records found for %q %s", len(records), title, docPath)
if err := api.RenderDocuments(title, fmt.Sprintf("Building pages 0 to %d descending", feedSize), target, records); err != nil {
return fmt.Errorf("%q %s error, %s", title, docPath, err)
}
return nil
}
// BuildSelectLists iterates over the exported data and creates fresh selectLists
func (api *EPrintsAPI) BuildSelectLists() error {
//FIXME: This should probably be Open not Create on dataset...
//c, err := dataset.Create(api.Dataset, dataset.GenerateBucketNames(dataset.DefaultAlphabet, 2))
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("BuildSelectLists() %s, %s", api.Dataset, err))
defer c.Close()
sLists := map[string]*dataset.SelectList{}
// Clear the select lists
log.Println("Clearing select lists")
// expected select lists used by epgo
slNames := []string{
"pubDate",
"localGroup",
"orcid",
"funder",
"grantNumber",
}
// Clear the select lists if they already exist
for _, name := range slNames {
c.Clear(name)
sLists[name], err = c.Select(name)
if err != nil {
return err
}
sLists[name].CustomLessFn = customLessFn
}
// Now iterate over the records and populate select lists
log.Println("Building select lists")
for i, ky := range c.Keys() {
rec := new(Record)
err := c.Read(ky, &rec)
if err != nil {
return err
}
// Update pubDate select list
dt := normalizeDate(rec.Date)
if rec.DateType == "published" && rec.Date != "" {
sLists["pubDate"].Push(fmt.Sprintf("%s%s%d", dt, indexDelimiter, rec.ID))
}
// Update localGroup select list
if len(rec.LocalGroup) > 0 {
for _, grp := range rec.LocalGroup {
grp = strings.TrimSpace(strings.Replace(grp, indexDelimiter, " ", -1))
if len(grp) > 0 {
sLists["localGroup"].Push(fmt.Sprintf("%s%s%s%s%d", grp, indexDelimiter, dt, indexDelimiter, rec.ID))
}
}
}
// Update orcid select list
if len(rec.Creators) > 0 {
for _, person := range rec.Creators {
orcid := strings.TrimSpace(person.ORCID)
// Update orcid select list
if len(orcid) > 0 {
sLists["orcid"].Push(fmt.Sprintf("%s%s%s%s%d", orcid, indexDelimiter, dt, indexDelimiter, rec.ID))
}
}
}
// Add funders and grantNumbers to select lists
if len(rec.Funders) > 0 {
for _, funder := range rec.Funders {
funderName := strings.TrimSpace(strings.Replace(funder.Agency, indexDelimiter, " ", -1))
grantNumber := strings.TrimSpace(strings.Replace(funder.GrantNumber, indexDelimiter, " ", -1))
// Update funder select list
if len(funderName) > 0 {
sLists["funder"].Push(fmt.Sprintf("%s%s%s%s%d", funderName, indexDelimiter, dt, indexDelimiter, rec.ID))
}
if len(funderName) > 0 && len(grantNumber) > 0 {
sLists["grantNumber"].Push(fmt.Sprintf("%s%s%s%s%s%s%d", funderName, indexDelimiter, grantNumber, indexDelimiter, dt, indexDelimiter, rec.ID))
}
}
}
if (i % 1000) == 0 {
log.Printf("%d recs processed", i)
}
}
log.Printf("Sorting and save %d lists", len(sLists))
for name, _ := range sLists {
log.Printf("Sorting and saving %s", name)
sLists[name].Sort(dataset.ASC)
// Finally we want to save our sorted results...
sLists[name].SaveList()
}
return nil
}
Added CaltechTHESIS custom fields
//
// Package ep is a collection of structures and functions for working with the E-Prints REST API
//
// @author R. S. Doiel, <rsdoiel@caltech.edu>
//
// Copyright (c) 2017, Caltech
// All rights not granted herein are expressly reserved by Caltech.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
package ep
import (
"encoding/json"
"encoding/xml"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"path"
"strconv"
"strings"
"time"
// Caltech Library packages
"github.com/caltechlibrary/bibtex"
"github.com/caltechlibrary/cli"
"github.com/caltechlibrary/dataset"
)
const (
// Version is the revision number for this implementation of epgo
Version = "v0.0.10-beta1"
// LicenseText holds the string for rendering License info on the command line
LicenseText = `
%s %s
Copyright (c) 2017, Caltech
All rights not granted herein are expressly reserved by Caltech.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.`
// EPrintsExportBatchSize sets the summary output frequency when exporting content from E-Prints
EPrintsExportBatchSize = 1000
// DefaultFeedSize sets the default size of rss, JSON, HTML include and index lists
DefaultFeedSize = 25
)
// These are our main bucket and index buckets
var (
// Primary collection
ePrintBucket = []byte("eprints")
// Select lists delimiter
indexDelimiter = "|"
)
func failCheck(err error, msg string) {
if err != nil {
log.Fatalf("%s\n", msg)
}
}
// EPrintsAPI holds the basic connectin information to read the REST API for EPrints
type EPrintsAPI struct {
XMLName xml.Name `json:"-"`
// EPGO_EPRINT_URL
URL *url.URL `xml:"epgo>eprint_url" json:"eprint_url"`
// EPGO_DATASET
Dataset string `xml:"epgo>dataset" json:"dataset"`
// EPGO_HTDOCS
Htdocs string `xml:"epgo>htdocs" json:"htdocs"`
}
// Person returns the contents of eprint>creators>item>name as a struct
type Person struct {
XMLName xml.Name `json:"-"`
Given string `xml:"name>given" json:"given"`
Family string `xml:"name>family" json:"family"`
ID string `xml:"id,omitempty" json:"id"`
// Customizations for Caltech Library
ORCID string `xml:"orcid,omitempty" json:"orcid,omitempty"`
EMail string `xml:"email,omitempty" json:"email,omitempty"`
Role string `xml:"role,omitempty" json:"role,omitempty"`
}
// PersonList is an array of pointers to Person structs
type PersonList []*Person
// RelatedURL is a structure containing information about a relationship
type RelatedURL struct {
XMLName xml.Name `json:"-"`
URL string `xml:"url" json:"url"`
Type string `xml:"type" json:"type"`
Description string `xml:"description" json:"description"`
}
// NumberingSystem is a structure describing other numbering systems for record
type NumberingSystem struct {
XMLName xml.Name `json:"-"`
Name string `xml:"name" json:"name"`
ID string `xml:"id" json:"id"`
}
// Funder is a structure describing a funding source for record
type Funder struct {
XMLName xml.Name `json:"-"`
Agency string `xml:"agency" json:"agency"`
GrantNumber string `xml:"grant_number,omitempty" json:"grant_number"`
}
// FunderList is an array of pointers to Funder structs
type FunderList []*Funder
// File structures in Document
type File struct {
XMLName xml.Name `json:"-"`
ID string `xml:"id,attr" json:"id"`
FileID int `xml:"fileid" json:"fileid"`
DatasetID string `xml:"datasetid" json:"datasetid"`
ObjectID int `xml:"objectid" json:"objectid"`
Filename string `xml:"filename" json:"filename"`
MimeType string `xml:"mime_type" json:"mime_type"`
Hash string `xml:"hash" json:"hash"`
HashType string `xml:"hash_type" json:"hash_type"`
FileSize int `xml:"filesize" json:"filesize"`
MTime string `xml:"mtime" json:"mtime"`
URL string `xml:"url" json:"url"`
}
// Document structures in Record
type Document struct {
XMLName xml.Name `json:"-"`
ID string `xml:"id,attr" json:"id"`
DocID int `xml:"docid" json:"docid"`
RevNumber int `xml:"rev_number" json:"rev_number"`
Files []*File `xml:"files>file" json:"files"`
EPrintID int `xml:"eprintid" json:"eprintid"`
Pos int `xml:"pos" json:"pos"`
Placement int `xml:"placement" json:"placement"`
MimeType string `xml:"mime_type" json:"mime_type"`
Format string `xml:"format" json:"format"`
Language string `xml:"language" json:"language"`
Security string `xml:"security" json:"security"`
License string `xml:"license" json:"license"`
Main string `xml:"main" json:"main"`
Content string `xml:"content" json:"content"`
}
// DocumentList is an array of pointers to Document structs
type DocumentList []*Document
// Record returns a structure that can be converted to JSON easily
type Record struct {
XMLName xml.Name `json:"-"`
Title string `xml:"eprint>title" json:"title"`
URI string `json:"uri"`
Abstract string `xml:"eprint>abstract" json:"abstract"`
Documents DocumentList `xml:"eprint>documents>document" json:"documents"`
Note string `xml:"eprint>note" json:"note"`
ID int `xml:"eprint>eprintid" json:"id"`
RevNumber int `xml:"eprint>rev_number" json:"rev_number"`
UserID int `xml:"eprint>userid" json:"userid"`
Dir string `xml:"eprint>dir" json:"eprint_dir"`
Datestamp string `xml:"eprint>datestamp" json:"datestamp"`
LastModified string `xml:"eprint>lastmod" json:"lastmod"`
StatusChange string `xml:"eprint>status_changed" json:"status_changed"`
Type string `xml:"eprint>type" json:"type"`
MetadataVisibility string `xml:"eprint>metadata_visibility" json:"metadata_visibility"`
Creators PersonList `xml:"eprint>creators>item" json:"creators"`
IsPublished string `xml:"eprint>ispublished" json:"ispublished"`
Subjects []string `xml:"eprint>subjects>item" json:"subjects"`
FullTextStatus string `xml:"eprint>full_text_status" json:"full_text_status"`
Keywords string `xml:"eprint>keywords" json:"keywords"`
Date string `xml:"eprint>date" json:"date"`
DateType string `xml:"eprint>date_type" json:"date_type"`
Publication string `xml:"eprint>publication" json:"publication"`
Volume string `xml:"eprint>volume" json:"volume"`
Number string `xml:"eprint>number" json:"number"`
PageRange string `xml:"eprint>pagerange" json:"pagerange"`
IDNumber string `xml:"eprint>id_number" json:"id_number"`
Refereed bool `xml:"eprint>refereed" json:"refereed"`
ISSN string `xml:"eprint>issn" json:"issn"`
OfficialURL string `xml:"eprint>official_url" json:"official_url"`
RelatedURL []*RelatedURL `xml:"eprint>related_url>item" json:"related_url"`
ReferenceText []string `xml:"eprint>referencetext>item" json:"referencetext"`
Rights string `xml:"eprint>rights" json:"rights"`
OfficialCitation string `xml:"eprint>official_cit" json:"official_citation"`
OtherNumberingSystem []*NumberingSystem `xml:"eprint>other_numbering_system>item,omitempty" json:"other_numbering_system"`
Funders FunderList `xml:"eprint>funders>item" json:"funders"`
Collection string `xml:"eprint>collection" json:"collection"`
// Thesis repository Customizations
ThesisType string `xml:"eprint>thesis_type,omitempty" json:"thesis_type"`
ThesisAdvisors PersonList `xml:"eprint>thesis_advisor>item,omitempty" json:"thesis_advisor,omitempty"`
ThesisCommittee PersonList `xml:"eprint>thesis_committee>item,omitempty" json:"thesis_committee,omitempty"`
ThesisDegree string `xml:"eprint>thesis_degree,omitempty" json:"thesis_degree,omitempty"`
ThesisDegreeGrantor string `xml:"eprint>thesis_degree_grantor,omitempty" json:"thesis_degree_grantor,omitempty"`
ThesisDefenseDate string `xml:"eprint>thesis_defense_date,omitempty" json:"thesis_defense_date,omitempty"`
OptionMajor string `xml:"eprint>option_major>item,omitempty" json:"option_major,omitempty"`
OptionMinor string `xml:"eprint>option_minor>item,omitempty" json:"option_minor,omitempty"`
GradOfcApprovalDate string `xml:"eprint>gradofc_approval_date,omitempty" json:"gradofc_approval_date,omitempty"`
Reviewer string `xml:"eprint>reviewer" json:"reviewer"`
LocalGroup []string `xml:"eprint>local_group>item" json:"local_group"`
}
type ePrintIDs struct {
XMLName xml.Name `xml:"html" json:"-"`
IDs []string `xml:"body>ul>li>a" json:"ids"`
}
func normalizeDate(in string) string {
var (
x int
err error
)
parts := strings.Split(in, "-")
if len(parts) == 1 {
parts = append(parts, "01")
}
if len(parts) == 2 {
parts = append(parts, "01")
}
for i := 0; i < len(parts); i++ {
x, err = strconv.Atoi(parts[i])
if err != nil {
x = 1
}
if i == 0 {
parts[i] = fmt.Sprintf("%0.4d", x)
} else {
parts[i] = fmt.Sprintf("%0.2d", x)
}
}
return strings.Join(parts, "-")
}
// Pick the first element in an array of strings
func first(s []string) string {
if len(s) > 0 {
return s[0]
}
return ""
}
// Pick the second element in an array of strings
func second(s []string) string {
if len(s) > 1 {
return s[1]
}
return ""
}
// Pick the list element in an array of strings
func last(s []string) string {
l := len(s) - 1
if l >= 0 {
return s[l]
}
return ""
}
// ToBibTeXElement takes an epgo.Record and turns it into a bibtex.Element record
func (rec *Record) ToBibTeXElement() *bibtex.Element {
bib := &bibtex.Element{}
bib.Set("type", rec.Type)
bib.Set("id", fmt.Sprintf("eprint-%d", rec.ID))
bib.Set("title", rec.Title)
if len(rec.Abstract) > 0 {
bib.Set("abstract", rec.Abstract)
}
if rec.DateType == "pub" {
dt, err := time.Parse("2006-01-02", rec.Date)
if err != nil {
bib.Set("year", dt.Format("2006"))
bib.Set("month", dt.Format("January"))
}
}
if len(rec.PageRange) > 0 {
bib.Set("pages", rec.PageRange)
}
/*
if len(rec.Note) > 0 {
bib.Set("note", rec.Note)
}
*/
if len(rec.Creators) > 0 {
people := []string{}
for _, person := range rec.Creators {
people = append(people, fmt.Sprintf("%s, %s", person.Family, person.Given))
}
bib.Set("author", strings.Join(people, " and "))
}
switch rec.Type {
case "article":
bib.Set("journal", rec.Publication)
case "book":
bib.Set("publisher", rec.Publication)
}
if len(rec.Volume) > 0 {
bib.Set("volume", rec.Volume)
}
if len(rec.Number) > 0 {
bib.Set("number", rec.Number)
}
return bib
}
// New creates a new API instance
func New(cfg *cli.Config) (*EPrintsAPI, error) {
var err error
EPrintURL := cfg.Get("eprint_url")
htdocs := cfg.Get("htdocs")
datasetName := cfg.Get("dataset")
api := new(EPrintsAPI)
if EPrintURL == "" {
EPrintURL = "http://localhost:8080"
}
api.URL, err = url.Parse(EPrintURL)
if err != nil {
return nil, fmt.Errorf("eprint url is malformed %s, %s", EPrintURL, err)
}
if htdocs == "" {
htdocs = "htdocs"
}
if datasetName == "" {
datasetName = "eprints"
}
api.Htdocs = htdocs
api.Dataset = datasetName
return api, nil
}
type byURI []string
func (s byURI) Len() int {
return len(s)
}
func (s byURI) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s byURI) Less(i, j int) bool {
s1 := strings.TrimSuffix(path.Base(s[i]), path.Ext(s[i]))
s2 := strings.TrimSuffix(path.Base(s[j]), path.Ext(s[j]))
a1, err := strconv.Atoi(s1)
if err != nil {
return false
}
a2, err := strconv.Atoi(s2)
if err != nil {
return false
}
return a1 > a2
}
// ListEPrintsURI returns a list of eprint record ids from the EPrints REST API
func (api *EPrintsAPI) ListEPrintsURI() ([]string, error) {
var (
results []string
)
api.URL.Path = path.Join("rest", "eprint") + "/"
resp, err := http.Get(api.URL.String())
if err != nil {
return nil, fmt.Errorf("requested %s, %s", api.URL.String(), err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("http error %s, %s", api.URL.String(), resp.Status)
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("content can't be read %s, %s", api.URL.String(), err)
}
eIDs := new(ePrintIDs)
err = xml.Unmarshal(content, &eIDs)
if err != nil {
return nil, err
}
// Build a list of Unique IDs in a map, then convert unique querys to results array
m := make(map[string]bool)
for _, val := range eIDs.IDs {
if strings.HasSuffix(val, ".xml") == true {
uri := "/" + path.Join("rest", "eprint", val)
if _, hasID := m[uri]; hasID == false {
// Save the new ID found
m[uri] = true
// Only store Unique IDs in result
results = append(results, uri)
}
}
}
return results, nil
}
// GetEPrint retrieves an EPrint record via REST API
// Returns a Record structure, the raw XML and an error.
func (api *EPrintsAPI) GetEPrint(uri string) (*Record, []byte, error) {
api.URL.Path = uri
resp, err := http.Get(api.URL.String())
if err != nil {
return nil, nil, fmt.Errorf("requested %s, %s", api.URL.String(), err)
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, nil, fmt.Errorf("http error %s, %s", api.URL.String(), resp.Status)
}
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, nil, fmt.Errorf("content can't be read %s, %s", api.URL.String(), err)
}
rec := new(Record)
err = xml.Unmarshal(content, &rec)
if err != nil {
return nil, content, err
}
return rec, content, nil
}
// ToNames takes an array of pointers to Person and returns a list of names (family, given)
func (persons PersonList) ToNames() []string {
var result []string
for _, person := range persons {
result = append(result, fmt.Sprintf("%s, %s", person.Family, person.Given))
}
return result
}
// ToORCIDs takes an an array of pointers to Person and returns a list of ORCID ids
func (persons PersonList) ToORCIDs() []string {
var result []string
for _, person := range persons {
result = append(result, person.ORCID)
}
return result
}
// ToAgencies takes an array of pointers to Funders and returns a list of Agency names
func (funders FunderList) ToAgencies() []string {
var result []string
for _, funder := range funders {
result = append(result, funder.Agency)
}
return result
}
// ToGrantNumbers takes an array of pointers to Funders and returns a list of Agency names
func (funders FunderList) ToGrantNumbers() []string {
var result []string
for _, funder := range funders {
result = append(result, funder.GrantNumber)
}
return result
}
func (record *Record) PubDate() string {
if record.DateType == "published" {
return record.Date
}
return ""
}
// ListURI returns a list of eprint record ids from the dataset
func (api *EPrintsAPI) ListURI(start, count int) ([]string, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("ListURI() %s, %s", api.Dataset, err))
defer c.Close()
ids := c.Keys()
results := []string{}
if count <= 0 {
count = len(ids) + 1
}
for i := start; count > 0; count-- {
results = append(results, ids[i])
}
return results, nil
}
// Get retrieves an EPrint record from the dataset
func (api *EPrintsAPI) Get(uri string) (*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("Get() %s, %s", api.Dataset, err))
defer c.Close()
record := new(Record)
if err := c.Read(uri, record); err != nil {
return nil, err
}
return record, nil
}
// customLessFn provides a Less() for ascending sorts non pubDate keys and descending sort for pubDate keys
func customLessFn(s []string, i, j int) bool {
a, b := strings.Split(s[i], indexDelimiter), strings.Split(s[j], indexDelimiter)
switch {
// Four part keys
case len(a) == 4 && a[0] == b[0] && a[1] == b[1] && a[2] == b[2] && a[3] < a[3]:
return true
case len(a) == 4 && a[0] == b[0] && a[1] == b[1] && a[2] > b[2]:
return true
case len(a) == 4 && a[0] == b[0] && a[1] < b[1]:
return true
case len(a) == 4 && a[0] < b[0]:
return true
// Three part keys
case len(a) == 3 && a[0] == b[0] && a[1] == b[1] && a[2] < b[2]:
return true
case len(a) == 3 && a[0] == b[0] && a[1] > b[1]:
return true
case len(a) == 3 && a[0] < b[0]:
return true
// Two part keys
case len(a) == 2 && a[0] == b[0] && a[1] < b[1]:
return true
case len(a) == 2 && a[0] > b[0]:
return true
// Single Keys
case len(a) == 1 && a[0] < b[0]:
return true
}
return false
}
// GetIDsBySelectList returns a list of ePrint IDs from a select list filterd by filterFn
// SelectLists are sorted by creation...
func (api *EPrintsAPI) GetIDsBySelectList(slName string, filterFn func(s string) bool) ([]string, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetIDs() %s, %s", api.Dataset, err))
defer c.Close()
sl, err := c.Select(slName)
if err != nil {
return nil, err
}
ids := []string{}
for _, id := range sl.List() {
if filterFn(id) == true {
ids = append(ids, last(strings.Split(id, indexDelimiter)))
}
}
return ids, err
}
// getRecordList takes a list of ePrint IDs and filters for start and end count return an array of records
func getRecordList(c *dataset.Collection, ePrintIDs []string, start int, count int, filterFn func(*Record) bool) ([]*Record, error) {
results := []*Record{}
i := 0
if count <= 0 {
count = len(ePrintIDs) + 1
}
for _, id := range ePrintIDs {
rec := new(Record)
if err := c.Read(id, &rec); err != nil {
return results, err
}
if filterFn(rec) == true {
if i >= start {
results = append(results, rec)
}
i++
count--
if count <= 0 {
return results, nil
}
}
}
return results, nil
}
// GetAllRecords reads and returns all records sorted by Publication Date
// returning an array of keys in ascending or decending order
func (api *EPrintsAPI) GetAllRecords() ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetAllRecords() %s, %s", api.Dataset, err))
defer c.Close()
ids, err := api.GetIDsBySelectList("pubDate", func(s string) bool {
return true
})
if err != nil {
return nil, err
}
// Build a select list in descending publication order
return getRecordList(c, ids, 0, -1, func(rec *Record) bool {
return true
})
}
// GetPublications reads the index for published content and returns a populated
// array of records found in index in ascending or decending order
func (api *EPrintsAPI) GetPublications(start, count int) ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetPublications() %s, %s", api.Dataset, err))
defer c.Close()
ids, err := api.GetIDsBySelectList("pubDate", func(s string) bool {
return true
})
if err != nil {
return nil, err
}
return getRecordList(c, ids, start, count, func(rec *Record) bool {
if rec.IsPublished == "pub" {
return true
}
return false
})
}
// GetArticles reads the index for published content and returns a populated
// array of records found in index in decending order
func (api *EPrintsAPI) GetArticles(start, count int) ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetArticles() %s, %s", api.Dataset, err))
defer c.Close()
ids, err := api.GetIDsBySelectList("pubDate", func(s string) bool {
return true
})
if err != nil {
return nil, err
}
return getRecordList(c, ids, start, count, func(rec *Record) bool {
if rec.IsPublished == "pub" && rec.Type == "article" {
return true
}
return false
})
}
// GetLocalGroups returns a JSON list of unique Group names in index
func (api *EPrintsAPI) GetLocalGroups() ([]string, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetLocalGroups() %s, %s", api.Dataset, err))
defer c.Close()
sl, err := c.Select("localGroup")
if err != nil {
return nil, err
}
// Note: Aggregate the local group names
groupNames := []string{}
lastGroup := ""
groupName := []string{}
for _, id := range sl.List() {
groupName = strings.Split(id, indexDelimiter)
if groupName[0] != lastGroup {
groupNames = append(groupNames, groupName[0])
lastGroup = groupName[0]
}
}
return groupNames, nil
}
// GetLocalGroupPublications returns a list of EPrint records with groupName
func (api *EPrintsAPI) GetLocalGroupPublications(groupName string, start, count int) ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetLocalGroupPublications() %s, %s", api.Dataset, err))
defer c.Close()
// Note: Filter for groupName, passing matching eprintIDs to getRecordList()
ids, err := api.GetIDsBySelectList("localGroup", func(s string) bool {
parts := strings.Split(s, indexDelimiter)
grp := first(parts)
if groupName == grp {
return true
}
return false
})
if err != nil {
return nil, err
}
return getRecordList(c, ids, start, count, func(rec *Record) bool {
if rec.IsPublished == "pub" {
return true
}
return false
})
}
// GetLocalGroupArticles returns a list of EPrint records with groupName
func (api *EPrintsAPI) GetLocalGroupArticles(groupName string, start, count int) ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetLocalGroupArticles() %s, %s", api.Dataset, err))
defer c.Close()
// Note: Filter for groupName, passing matching eprintIDs to getRecordList()
ids, err := api.GetIDsBySelectList("localGroup", func(s string) bool {
parts := strings.Split(s, indexDelimiter)
grp := first(parts)
if groupName == grp {
return true
}
return false
})
if err != nil {
return nil, err
}
return getRecordList(c, ids, start, count, func(rec *Record) bool {
if rec.IsPublished == "pub" && rec.Type == "article" {
return true
}
return false
})
}
// GetORCIDs returns a list unique of ORCID IDs in index
func (api *EPrintsAPI) GetORCIDs() ([]string, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetORCIDs() %s, %s", api.Dataset, err))
defer c.Close()
sl, err := c.Select("orcid")
if err != nil {
return nil, err
}
// Note: Filter for orcid, passing matching eprintIDs to getRecordList()
orcids := []string{}
lastORCID := ""
for _, id := range sl.List() {
orcid := first(strings.Split(id, indexDelimiter))
if orcid != lastORCID {
lastORCID = orcid
orcids = append(orcids, orcid)
}
}
return orcids, nil
}
// GetORCIDPublications returns a list of EPrint records with a given ORCID
func (api *EPrintsAPI) GetORCIDPublications(orcid string, start, count int) ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetORCIDPublications() %s, %s", api.Dataset, err))
defer c.Close()
// Note: Filter for orcid, passing matching eprintIDs to getRecordList()
ids, err := api.GetIDsBySelectList("orcid", func(s string) bool {
key := first(strings.Split(s, indexDelimiter))
if orcid == key {
return true
}
return false
})
if err != nil {
return nil, err
}
return getRecordList(c, ids, start, count, func(rec *Record) bool {
if rec.IsPublished == "pub" {
return true
}
return false
})
}
// GetORCIDArticles returns a list of EPrint records with a given ORCID
func (api *EPrintsAPI) GetORCIDArticles(orcid string, start, count int) ([]*Record, error) {
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("GetORCIDArticles() %s, %s", api.Dataset, err))
defer c.Close()
// Note: Filter for orcid, passing matching eprintIDs to getRecordList()
ids, err := api.GetIDsBySelectList("orcid", func(s string) bool {
key := first(strings.Split(s, indexDelimiter))
if orcid == key {
return true
}
return false
})
if err != nil {
return nil, err
}
return getRecordList(c, ids, start, count, func(rec *Record) bool {
if rec.IsPublished == "pub" && rec.Type == "article" {
return true
}
return false
})
}
// RenderEPrint writes a single EPrint record to disc.
func (api *EPrintsAPI) RenderEPrint(basepath string, record *Record) error {
// Convert record to JSON
src, err := json.Marshal(record)
if err != nil {
return err
}
fname := path.Join(basepath, fmt.Sprintf("%d.json", record.ID))
return ioutil.WriteFile(fname, src, 0664)
}
// RenderDocuments writes JSON, BibTeX documents to the directory indicated by docpath
func (api *EPrintsAPI) RenderDocuments(docTitle, docDescription, docpath string, records []*Record) error {
// Create the the directory part of docpath if neccessary
if _, err := os.Open(path.Join(api.Htdocs, docpath)); err != nil && os.IsNotExist(err) == true {
os.MkdirAll(path.Join(api.Htdocs, path.Dir(docpath)), 0775)
}
// Writing JSON file
fname := path.Join(api.Htdocs, docpath+".json")
src, err := json.Marshal(records)
if err != nil {
return fmt.Errorf("Can't convert records to JSON %s, %s", fname, err)
}
err = ioutil.WriteFile(fname, src, 0664)
if err != nil {
return fmt.Errorf("Can't write %s, %s", fname, err)
}
// Write out BibTeX file.
bibDoc := []string{}
for _, rec := range records {
bibDoc = append(bibDoc, rec.ToBibTeXElement().String())
}
fname = path.Join(api.Htdocs, docpath+".bib")
err = ioutil.WriteFile(fname, []byte(strings.Join(bibDoc, "\n\n")), 0664)
if err != nil {
return fmt.Errorf("Can't write %s, %s", fname, err)
}
return nil
}
// BuildPages generates JSON and BibTeX versions of collected records
// by calling RenderDocuments with the appropriate data.
func (api *EPrintsAPI) BuildPages(feedSize int, title, target string, filter func(*EPrintsAPI, int, int) ([]*Record, error)) error {
if feedSize < 1 {
feedSize = DefaultFeedSize
}
docPath := path.Join(api.Htdocs, target)
// Collect the published records
records, err := filter(api, 0, feedSize)
if err != nil {
return err
}
if len(records) == 0 {
return fmt.Errorf("Zero records for %q, %s", title, docPath)
}
log.Printf("%d records found for %q %s", len(records), title, docPath)
if err := api.RenderDocuments(title, fmt.Sprintf("Building pages 0 to %d descending", feedSize), target, records); err != nil {
return fmt.Errorf("%q %s error, %s", title, docPath, err)
}
return nil
}
// BuildSelectLists iterates over the exported data and creates fresh selectLists
func (api *EPrintsAPI) BuildSelectLists() error {
//FIXME: This should probably be Open not Create on dataset...
//c, err := dataset.Create(api.Dataset, dataset.GenerateBucketNames(dataset.DefaultAlphabet, 2))
c, err := dataset.Open(api.Dataset)
failCheck(err, fmt.Sprintf("BuildSelectLists() %s, %s", api.Dataset, err))
defer c.Close()
sLists := map[string]*dataset.SelectList{}
// Clear the select lists
log.Println("Clearing select lists")
// expected select lists used by epgo
slNames := []string{
"pubDate",
"localGroup",
"orcid",
"funder",
"grantNumber",
}
// Clear the select lists if they already exist
for _, name := range slNames {
c.Clear(name)
sLists[name], err = c.Select(name)
if err != nil {
return err
}
sLists[name].CustomLessFn = customLessFn
}
// Now iterate over the records and populate select lists
log.Println("Building select lists")
for i, ky := range c.Keys() {
rec := new(Record)
err := c.Read(ky, &rec)
if err != nil {
return err
}
// Update pubDate select list
dt := normalizeDate(rec.Date)
if rec.DateType == "published" && rec.Date != "" {
sLists["pubDate"].Push(fmt.Sprintf("%s%s%d", dt, indexDelimiter, rec.ID))
}
// Update localGroup select list
if len(rec.LocalGroup) > 0 {
for _, grp := range rec.LocalGroup {
grp = strings.TrimSpace(strings.Replace(grp, indexDelimiter, " ", -1))
if len(grp) > 0 {
sLists["localGroup"].Push(fmt.Sprintf("%s%s%s%s%d", grp, indexDelimiter, dt, indexDelimiter, rec.ID))
}
}
}
// Update orcid select list
if len(rec.Creators) > 0 {
for _, person := range rec.Creators {
orcid := strings.TrimSpace(person.ORCID)
// Update orcid select list
if len(orcid) > 0 {
sLists["orcid"].Push(fmt.Sprintf("%s%s%s%s%d", orcid, indexDelimiter, dt, indexDelimiter, rec.ID))
}
}
}
// Add funders and grantNumbers to select lists
if len(rec.Funders) > 0 {
for _, funder := range rec.Funders {
funderName := strings.TrimSpace(strings.Replace(funder.Agency, indexDelimiter, " ", -1))
grantNumber := strings.TrimSpace(strings.Replace(funder.GrantNumber, indexDelimiter, " ", -1))
// Update funder select list
if len(funderName) > 0 {
sLists["funder"].Push(fmt.Sprintf("%s%s%s%s%d", funderName, indexDelimiter, dt, indexDelimiter, rec.ID))
}
if len(funderName) > 0 && len(grantNumber) > 0 {
sLists["grantNumber"].Push(fmt.Sprintf("%s%s%s%s%s%s%d", funderName, indexDelimiter, grantNumber, indexDelimiter, dt, indexDelimiter, rec.ID))
}
}
}
if (i % 1000) == 0 {
log.Printf("%d recs processed", i)
}
}
log.Printf("Sorting and save %d lists", len(sLists))
for name, _ := range sLists {
log.Printf("Sorting and saving %s", name)
sLists[name].Sort(dataset.ASC)
// Finally we want to save our sorted results...
sLists[name].SaveList()
}
return nil
}
|
package raft
import (
"bytes"
"code.google.com/p/goprotobuf/proto"
"encoding/json"
"fmt"
"github.com/goraft/raft/protobuf"
"io"
)
// A log entry stores a single item in the log.
type LogEntry struct {
log *Log
Index uint64
Term uint64
CommandName string
Command []byte
Position int64 // position in the log file
event *ev
}
// Creates a new log entry associated with a log.
func newLogEntry(log *Log, event *ev, index uint64, term uint64, command Command) (*LogEntry, error) {
var buf bytes.Buffer
var commandName string
if command != nil {
commandName = command.CommandName()
if encoder, ok := command.(CommandEncoder); ok {
if err := encoder.Encode(&buf); err != nil {
return nil, err
}
} else {
json.NewEncoder(&buf).Encode(command)
}
}
e := &LogEntry{
log: log,
Index: index,
Term: term,
CommandName: commandName,
Command: buf.Bytes(),
event: event,
}
return e, nil
}
// Encodes the log entry to a buffer. Returns the number of bytes
// written and any error that may have occurred.
func (e *LogEntry) encode(w io.Writer) (int, error) {
defer e.log.pBuffer.Reset()
e.log.pLogEntry.Index = proto.Uint64(e.Index)
e.log.pLogEntry.Term = proto.Uint64(e.Term)
e.log.pLogEntry.CommandName = proto.String(e.CommandName)
e.log.pLogEntry.Command = e.Command
err := e.log.pBuffer.Marshal(e.log.pLogEntry)
if err != nil {
return -1, err
}
if _, err = fmt.Fprintf(w, "%8x\n", len(e.log.pBuffer.Bytes())); err != nil {
return -1, err
}
return w.Write(e.log.pBuffer.Bytes())
}
// Decodes the log entry from a buffer. Returns the number of bytes read and
// any error that occurs.
func (e *LogEntry) decode(r io.Reader) (int, error) {
var length int
_, err := fmt.Fscanf(r, "%8x\n", &length)
if err != nil {
return -1, err
}
data := make([]byte, length)
_, err = r.Read(data)
if err != nil {
return -1, err
}
pb := &protobuf.ProtoLogEntry{}
if err = proto.Unmarshal(data, pb); err != nil {
return -1, err
}
e.Term = pb.GetTerm()
e.Index = pb.GetIndex()
e.CommandName = pb.GetCommandName()
e.Command = pb.Command
return length, nil
}
fix(log_entry.go) reports the right read size(including the length field puls a newline char)
package raft
import (
"bytes"
"code.google.com/p/goprotobuf/proto"
"encoding/json"
"fmt"
"github.com/goraft/raft/protobuf"
"io"
)
// A log entry stores a single item in the log.
type LogEntry struct {
log *Log
Index uint64
Term uint64
CommandName string
Command []byte
Position int64 // position in the log file
event *ev
}
// Creates a new log entry associated with a log.
func newLogEntry(log *Log, event *ev, index uint64, term uint64, command Command) (*LogEntry, error) {
var buf bytes.Buffer
var commandName string
if command != nil {
commandName = command.CommandName()
if encoder, ok := command.(CommandEncoder); ok {
if err := encoder.Encode(&buf); err != nil {
return nil, err
}
} else {
json.NewEncoder(&buf).Encode(command)
}
}
e := &LogEntry{
log: log,
Index: index,
Term: term,
CommandName: commandName,
Command: buf.Bytes(),
event: event,
}
return e, nil
}
// Encodes the log entry to a buffer. Returns the number of bytes
// written and any error that may have occurred.
func (e *LogEntry) encode(w io.Writer) (int, error) {
defer e.log.pBuffer.Reset()
e.log.pLogEntry.Index = proto.Uint64(e.Index)
e.log.pLogEntry.Term = proto.Uint64(e.Term)
e.log.pLogEntry.CommandName = proto.String(e.CommandName)
e.log.pLogEntry.Command = e.Command
err := e.log.pBuffer.Marshal(e.log.pLogEntry)
if err != nil {
return -1, err
}
if _, err = fmt.Fprintf(w, "%8x\n", len(e.log.pBuffer.Bytes())); err != nil {
return -1, err
}
return w.Write(e.log.pBuffer.Bytes())
}
// Decodes the log entry from a buffer. Returns the number of bytes read and
// any error that occurs.
func (e *LogEntry) decode(r io.Reader) (int, error) {
var length int
_, err := fmt.Fscanf(r, "%8x\n", &length)
if err != nil {
return -1, err
}
data := make([]byte, length)
_, err = r.Read(data)
if err != nil {
return -1, err
}
pb := &protobuf.ProtoLogEntry{}
if err = proto.Unmarshal(data, pb); err != nil {
return -1, err
}
e.Term = pb.GetTerm()
e.Index = pb.GetIndex()
e.CommandName = pb.GetCommandName()
e.Command = pb.Command
return length + 8 + 1, nil
}
|
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package dependency
import (
"time"
"github.com/juju/errors"
"github.com/juju/loggo"
"launchpad.net/tomb"
"github.com/juju/juju/worker"
)
var logger = loggo.GetLogger("juju.worker.dependency")
// NewEngine returns an Engine that will maintain any Installed Manifolds until
// either the engine is killed or one of the manifolds' workers returns an error
// that satisfies isFatal. The caller takes responsibility for the returned Engine.
func NewEngine(isFatal IsFatalFunc, errorDelay, bounceDelay time.Duration) Engine {
engine := &engine{
isFatal: isFatal,
errorDelay: errorDelay,
bounceDelay: bounceDelay,
manifolds: map[string]Manifold{},
dependents: map[string][]string{},
current: map[string]workerInfo{},
install: make(chan installTicket),
started: make(chan startedTicket),
stopped: make(chan stoppedTicket),
}
go func() {
defer engine.tomb.Done()
engine.tomb.Kill(engine.loop())
}()
return engine
}
// engine maintains workers corresponding to its installed manifolds, and
// restarts them whenever their inputs change.
type engine struct {
tomb tomb.Tomb
// isFatal allows errors generated by workers to stop the engine.
isFatal IsFatalFunc
// errorDelay controls how long the engine waits before restarting a worker
// that encountered an unknown error.
errorDelay time.Duration
// bounceDelay controls how long the engine waits before restarting a worker
// that was deliberately shut down because its dependencies changed.
bounceDelay time.Duration
// manifolds holds the installed manifolds by name.
manifolds map[string]Manifold
// dependents holds, for each named manifold, those that depend on it.
dependents map[string][]string
// current holds the active worker information for each installed manifold.
current map[string]workerInfo
// install, started, and stopped each communicate requests and changes into
// the loop goroutine.
install chan installTicket
started chan startedTicket
stopped chan stoppedTicket
}
// loop serializes manifold install operations and worker start/stop notifications.
// It's notable for its oneShotDying var, which is necessary because any number of
// start/stop notification could be in flight at the point the engine needs to stop;
// we need to handle all those, and any subsequent messages, until the main loop is
// confident that every worker has stopped. (The usual pattern -- to defer a cleanup
// method to run before tomb.Done in NewEngine -- is not cleanly applicable, because
// it needs to duplicate that start/stop message handling; better to localise that
// in this method.)
func (engine *engine) loop() error {
oneShotDying := engine.tomb.Dying()
for {
select {
case <-oneShotDying:
oneShotDying = nil
for name := range engine.current {
engine.stop(name)
}
case ticket := <-engine.install:
// This is safe so long as the Install method reads the result.
ticket.result <- engine.gotInstall(ticket.name, ticket.manifold)
case ticket := <-engine.started:
engine.gotStarted(ticket.name, ticket.worker)
case ticket := <-engine.stopped:
engine.gotStopped(ticket.name, ticket.error)
}
if engine.isDying() {
if engine.allStopped() {
return tomb.ErrDying
}
}
}
}
// Kill is part of the worker.Worker interface.
func (engine *engine) Kill() {
engine.tomb.Kill(nil)
}
// Wait is part of the worker.Worker interface.
func (engine *engine) Wait() error {
return engine.tomb.Wait()
}
// Install is part of the Engine interface.
func (engine *engine) Install(name string, manifold Manifold) error {
result := make(chan error)
select {
case <-engine.tomb.Dying():
return errors.New("engine is shutting down")
case engine.install <- installTicket{name, manifold, result}:
// This is safe so long as the loop sends a result.
return <-result
}
}
// gotInstall handles the params originally supplied to Install. It must only be
// called from the loop goroutine.
func (engine *engine) gotInstall(name string, manifold Manifold) error {
logger.Infof("installing %s manifold...", name)
if _, found := engine.manifolds[name]; found {
return errors.Errorf("%s manifold already installed", name)
}
for _, input := range manifold.Inputs {
if _, found := engine.manifolds[input]; !found {
logger.Infof("%s manifold depends on unknown %s manifold", name, input)
}
}
engine.manifolds[name] = manifold
for _, input := range manifold.Inputs {
engine.dependents[input] = append(engine.dependents[input], name)
}
engine.current[name] = workerInfo{}
engine.start(name, 0)
return nil
}
// start invokes a runWorker goroutine for the manifold with the supplied name. It
// must only be called from the loop goroutine.
func (engine *engine) start(name string, delay time.Duration) {
// Check preconditions.
manifold, found := engine.manifolds[name]
if !found {
engine.tomb.Kill(errors.Errorf("fatal: unknown manifold %s", name))
}
// Copy current info and check more preconditions.
info := engine.current[name]
if !info.stopped() {
engine.tomb.Kill(errors.Errorf("fatal: trying to start a second %s manifold worker", name))
}
// Final check that we're not shutting down yet...
if engine.isDying() {
logger.Infof("not starting %s manifold worker (shutting down)", name)
return
}
// ...then update the info, copy it back to the engine, and start a worker
// goroutine based on current known state.
info.starting = true
engine.current[name] = info
getResource := engine.getResourceFunc(manifold.Inputs)
go engine.runWorker(name, delay, manifold.Start, getResource)
}
// getResourceFunc returns a GetResourceFunc backed by a snapshot of current
// worker state, restricted to those workers declared in inputs. It must only
// be called from the loop goroutine; see inside for a detailed dicsussion of
// why we took this appproach.
func (engine *engine) getResourceFunc(inputs []string) GetResourceFunc {
// We snapshot the resources available at invocation time, rather than adding an
// additional communicate-resource-request channel. The latter approach is not
// unreasonable... but is prone to inelegant scrambles when starting several
// dependent workers at once. For example:
//
// * Install manifold A; loop starts worker A
// * Install manifold B; loop starts worker B
// * A communicates its worker back to loop; main thread bounces B
// * B asks for A, gets A, doesn't react to bounce (*)
// * B communicates its worker back to loop; loop kills it immediately in
// response to earlier bounce
// * loop starts worker B again, now everything's fine; but, still, yuck.
// This is not a happy path to take by default.
//
// The problem, of course, is in the (*); the main thread *does* know that B
// needs to bounce soon anyway, and it *could* communicate that fact back via
// an error over a channel back into getResource; the StartFunc could then
// just return (say) that ErrResourceChanged and avoid the hassle of creating
// a worker. But that adds a whole layer of complexity (and unpredictability
// in tests, which is not much fun) for very little benefit.
//
// In the analogous scenario with snapshotted dependencies, we see a happier
// picture at startup time:
//
// * Install manifold A; loop starts worker A
// * Install manifold B; loop starts worker B with empty resource snapshot
// * A communicates its worker back to loop; main thread bounces B
// * B's StartFunc asks for A, gets nothing, returns ErrUnmetDependencies
// * loop restarts worker B with an up-to-date snapshot, B works fine
//
// We assume that, in the common case, most workers run without error most
// of the time; and, thus, that the vast majority of worker startups will
// happen as an agent starts. Furthermore, most of them will have simple
// hard dependencies, and their Start funcs will be easy to write; the only
// components that may be impacted by such a strategy will be those workers
// which still want to run (with reduced functionality) with some dependency
// unmet.
//
// Those may indeed suffer the occasional extra bounce as the system comes
// to stability as it starts, or after a change; but workers *must* be
// written for resilience in the face of arbitrary bounces *anyway*, so it
// shouldn't be harmful
outputs := map[string]OutputFunc{}
workers := map[string]worker.Worker{}
for _, resourceName := range inputs {
outputs[resourceName] = engine.manifolds[resourceName].Output
workers[resourceName] = engine.current[resourceName].worker
}
return func(resourceName string, out interface{}) bool {
switch {
case workers[resourceName] == nil:
return false
case outputs[resourceName] == nil:
return out == nil
}
return outputs[resourceName](workers[resourceName], out)
}
}
// runWorker starts the supplied manifold's worker and communicates it back to the
// loop goroutine; waits for worker completion; and communicates any error encountered
// back to the loop goroutine. It must not be run on the loop goroutine.
func (engine *engine) runWorker(name string, delay time.Duration, start StartFunc, getResource GetResourceFunc) {
// We may or may not send on started, but we *must* send on stopped.
engine.stopped <- stoppedTicket{name, func() error {
logger.Infof("starting %s manifold worker in %s...", name, delay)
select {
case <-time.After(delay):
case <-engine.tomb.Dying():
logger.Infof("not starting %s manifold worker (shutting down)", name)
return tomb.ErrDying
}
logger.Infof("starting %s manifold worker", name)
worker, err := start(getResource)
if err != nil {
logger.Infof("failed to start %s manifold worker: %v", name, err)
return err
}
logger.Infof("running %s manifold worker: %v", name, worker)
select {
case <-engine.tomb.Dying():
logger.Infof("stopping %s manifold worker (shutting down)", name)
worker.Kill()
case engine.started <- startedTicket{name, worker}:
logger.Infof("registered %s manifold worker", name)
}
return worker.Wait()
}()}
}
// gotStarted updates the engine to reflect the creation of a worker. It must
// only be called from the loop goroutine.
func (engine *engine) gotStarted(name string, worker worker.Worker) {
// Copy current info; check preconditions and abort the workers if we've
// already been asked to stop it.
info := engine.current[name]
switch {
case info.worker != nil:
engine.tomb.Kill(errors.Errorf("fatal: unexpected %s manifold worker start", name))
fallthrough
case info.stopping, engine.isDying():
logger.Infof("%s manifold worker no longer required", name)
worker.Kill()
default:
// It's fine to use this worker; update info and copy back.
logger.Infof("%s manifold worker started: %v", name, worker)
info.starting = false
info.worker = worker
engine.current[name] = info
// Any manifold that declares this one as an input needs to be restarted.
engine.bounceDependents(name)
}
}
// gotStopped updates the engine to reflect the demise of (or failure to create)
// a worker. It must only be called from the loop goroutine.
func (engine *engine) gotStopped(name string, err error) {
logger.Infof("%s manifold worker stopped: %v", name, err)
// Copy current info and check for reasons to stop the engine.
info := engine.current[name]
if info.stopped() {
engine.tomb.Kill(errors.New("fatal: unexpected %s manifold worker stop"))
} else if engine.isFatal(err) {
engine.tomb.Kill(err)
}
// Reset engine info; and bail out if we can be sure there's no need to bounce.
engine.current[name] = workerInfo{}
if engine.isDying() {
logger.Infof("permanently stopped %s manifold worker (shutting down)", name)
return
}
// If we told the worker to stop, we should start it again immediately,
// whatever else happened.
if info.stopping {
engine.start(name, engine.bounceDelay)
} else {
// If we didn't stop it ourselves, we need to interpret the error.
switch err {
case nil:
// Nothing went wrong; the task completed successfully. Nothing
// needs to be done (unless the inputs change, in which case it
// gets to check again).
case ErrUnmetDependencies:
// The task can't even start with the current state. Nothing more
// can be done (until the inputs change, in which case we retry
// anyway).
default:
// Something went wrong but we don't know what. Try again soon.
engine.start(name, engine.errorDelay)
}
}
// Manifolds that declared a dependency on this one only need to be notified
// if the worker has changed; if it was already nil, nobody needs to know.
if info.worker != nil {
engine.bounceDependents(name)
}
}
// stop ensures that any running or starting worker will be stopped in the
// near future. It must only be called from the loop goroutine.
func (engine *engine) stop(name string) {
// If already stopping or stopped, just don't do anything.
info := engine.current[name]
if info.stopping || info.stopped() {
return
}
// Update info, kill worker if present, and copy info back to engine.
info.stopping = true
if info.worker != nil {
info.worker.Kill()
}
engine.current[name] = info
}
// isDying returns true if the engine is shutting down. It's safe to call it
// from any goroutine.
func (engine *engine) isDying() bool {
select {
case <-engine.tomb.Dying():
return true
default:
return false
}
}
// allStopped returns true if no workers are running or starting. It must only
// be called from the loop goroutine.
func (engine *engine) allStopped() bool {
for _, info := range engine.current {
if !info.stopped() {
return false
}
}
return true
}
// bounceDependents starts every stopped dependent of the named manifold, and
// stops every started one (and trusts the rest of the engine to restart them).
// It must only be called from the loop goroutine.
func (engine *engine) bounceDependents(name string) {
logger.Infof("restarting dependents of %s manifold", name)
for _, dependentName := range engine.dependents[name] {
if engine.current[dependentName].stopped() {
engine.start(dependentName, engine.bounceDelay)
} else {
engine.stop(dependentName)
}
}
}
// workerInfo stores what an engine needs to know about the worker for a given
// Manifold.
type workerInfo struct {
starting bool
stopping bool
worker worker.Worker
}
// stopped returns true unless the worker is either assigned or starting.
func (info workerInfo) stopped() bool {
switch {
case info.worker != nil:
return false
case info.starting:
return false
}
return true
}
// installTicket is used by engine to induce installation of a named manifold
// and pass on any errors encountered in the process.
type installTicket struct {
name string
manifold Manifold
result chan<- error
}
// startedTicket is used by engine to notify the loop of the creation of the
// worker for a particular manifold.
type startedTicket struct {
name string
worker worker.Worker
}
// stoppedTicket is used by engine to notify the loop of the demise of (or
// failure to create) the worker for a particular manifold.
type stoppedTicket struct {
name string
error error
}
trivials
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package dependency
import (
"time"
"github.com/juju/errors"
"github.com/juju/loggo"
"launchpad.net/tomb"
"github.com/juju/juju/worker"
)
var logger = loggo.GetLogger("juju.worker.dependency")
// NewEngine returns an Engine that will maintain any Installed Manifolds until
// either the engine is killed or one of the manifolds' workers returns an error
// that satisfies isFatal. The caller takes responsibility for the returned Engine.
func NewEngine(isFatal IsFatalFunc, errorDelay, bounceDelay time.Duration) Engine {
engine := &engine{
isFatal: isFatal,
errorDelay: errorDelay,
bounceDelay: bounceDelay,
manifolds: map[string]Manifold{},
dependents: map[string][]string{},
current: map[string]workerInfo{},
install: make(chan installTicket),
started: make(chan startedTicket),
stopped: make(chan stoppedTicket),
}
go func() {
defer engine.tomb.Done()
engine.tomb.Kill(engine.loop())
}()
return engine
}
// engine maintains workers corresponding to its installed manifolds, and
// restarts them whenever their inputs change.
type engine struct {
tomb tomb.Tomb
// isFatal allows errors generated by workers to stop the engine.
isFatal IsFatalFunc
// errorDelay controls how long the engine waits before restarting a worker
// that encountered an unknown error.
errorDelay time.Duration
// bounceDelay controls how long the engine waits before restarting a worker
// that was deliberately shut down because its dependencies changed.
bounceDelay time.Duration
// manifolds holds the installed manifolds by name.
manifolds map[string]Manifold
// dependents holds, for each named manifold, those that depend on it.
dependents map[string][]string
// current holds the active worker information for each installed manifold.
current map[string]workerInfo
// install, started, and stopped each communicate requests and changes into
// the loop goroutine.
install chan installTicket
started chan startedTicket
stopped chan stoppedTicket
}
// loop serializes manifold install operations and worker start/stop notifications.
// It's notable for its oneShotDying var, which is necessary because any number of
// start/stop notification could be in flight at the point the engine needs to stop;
// we need to handle all those, and any subsequent messages, until the main loop is
// confident that every worker has stopped. (The usual pattern -- to defer a cleanup
// method to run before tomb.Done in NewEngine -- is not cleanly applicable, because
// it needs to duplicate that start/stop message handling; better to localise that
// in this method.)
func (engine *engine) loop() error {
oneShotDying := engine.tomb.Dying()
for {
select {
case <-oneShotDying:
oneShotDying = nil
for name := range engine.current {
engine.stop(name)
}
case ticket := <-engine.install:
// This is safe so long as the Install method reads the result.
ticket.result <- engine.gotInstall(ticket.name, ticket.manifold)
case ticket := <-engine.started:
engine.gotStarted(ticket.name, ticket.worker)
case ticket := <-engine.stopped:
engine.gotStopped(ticket.name, ticket.error)
}
if engine.isDying() {
if engine.allStopped() {
return tomb.ErrDying
}
}
}
}
// Kill is part of the worker.Worker interface.
func (engine *engine) Kill() {
engine.tomb.Kill(nil)
}
// Wait is part of the worker.Worker interface.
func (engine *engine) Wait() error {
return engine.tomb.Wait()
}
// Install is part of the Engine interface.
func (engine *engine) Install(name string, manifold Manifold) error {
result := make(chan error)
select {
case <-engine.tomb.Dying():
return errors.New("engine is shutting down")
case engine.install <- installTicket{name, manifold, result}:
// This is safe so long as the loop sends a result.
return <-result
}
}
// gotInstall handles the params originally supplied to Install. It must only be
// called from the loop goroutine.
func (engine *engine) gotInstall(name string, manifold Manifold) error {
logger.Infof("installing %s manifold...", name)
if _, found := engine.manifolds[name]; found {
return errors.Errorf("%s manifold already installed", name)
}
for _, input := range manifold.Inputs {
if _, found := engine.manifolds[input]; !found {
logger.Infof("%s manifold depends on unknown %s manifold", name, input)
}
}
engine.manifolds[name] = manifold
for _, input := range manifold.Inputs {
engine.dependents[input] = append(engine.dependents[input], name)
}
engine.current[name] = workerInfo{}
engine.start(name, 0)
return nil
}
// start invokes a runWorker goroutine for the manifold with the supplied name. It
// must only be called from the loop goroutine.
func (engine *engine) start(name string, delay time.Duration) {
// Check preconditions.
manifold, found := engine.manifolds[name]
if !found {
engine.tomb.Kill(errors.Errorf("fatal: unknown manifold %s", name))
}
// Copy current info and check more preconditions.
info := engine.current[name]
if !info.stopped() {
engine.tomb.Kill(errors.Errorf("fatal: trying to start a second %s manifold worker", name))
}
// Final check that we're not shutting down yet...
if engine.isDying() {
logger.Infof("not starting %s manifold worker (shutting down)", name)
return
}
// ...then update the info, copy it back to the engine, and start a worker
// goroutine based on current known state.
info.starting = true
engine.current[name] = info
getResource := engine.getResourceFunc(manifold.Inputs)
go engine.runWorker(name, delay, manifold.Start, getResource)
}
// getResourceFunc returns a GetResourceFunc backed by a snapshot of current
// worker state, restricted to those workers declared in inputs. It must only
// be called from the loop goroutine; see inside for a detailed dicsussion of
// why we took this appproach.
func (engine *engine) getResourceFunc(inputs []string) GetResourceFunc {
// We snapshot the resources available at invocation time, rather than adding an
// additional communicate-resource-request channel. The latter approach is not
// unreasonable... but is prone to inelegant scrambles when starting several
// dependent workers at once. For example:
//
// * Install manifold A; loop starts worker A
// * Install manifold B; loop starts worker B
// * A communicates its worker back to loop; main thread bounces B
// * B asks for A, gets A, doesn't react to bounce (*)
// * B communicates its worker back to loop; loop kills it immediately in
// response to earlier bounce
// * loop starts worker B again, now everything's fine; but, still, yuck.
// This is not a happy path to take by default.
//
// The problem, of course, is in the (*); the main thread *does* know that B
// needs to bounce soon anyway, and it *could* communicate that fact back via
// an error over a channel back into getResource; the StartFunc could then
// just return (say) that ErrResourceChanged and avoid the hassle of creating
// a worker. But that adds a whole layer of complexity (and unpredictability
// in tests, which is not much fun) for very little benefit.
//
// In the analogous scenario with snapshotted dependencies, we see a happier
// picture at startup time:
//
// * Install manifold A; loop starts worker A
// * Install manifold B; loop starts worker B with empty resource snapshot
// * A communicates its worker back to loop; main thread bounces B
// * B's StartFunc asks for A, gets nothing, returns ErrUnmetDependencies
// * loop restarts worker B with an up-to-date snapshot, B works fine
//
// We assume that, in the common case, most workers run without error most
// of the time; and, thus, that the vast majority of worker startups will
// happen as an agent starts. Furthermore, most of them will have simple
// hard dependencies, and their Start funcs will be easy to write; the only
// components that may be impacted by such a strategy will be those workers
// which still want to run (with reduced functionality) with some dependency
// unmet.
//
// Those may indeed suffer the occasional extra bounce as the system comes
// to stability as it starts, or after a change; but workers *must* be
// written for resilience in the face of arbitrary bounces *anyway*, so it
// shouldn't be harmful
outputs := map[string]OutputFunc{}
workers := map[string]worker.Worker{}
for _, resourceName := range inputs {
outputs[resourceName] = engine.manifolds[resourceName].Output
workers[resourceName] = engine.current[resourceName].worker
}
return func(resourceName string, out interface{}) bool {
switch {
case workers[resourceName] == nil:
return false
case outputs[resourceName] == nil:
return out == nil
}
return outputs[resourceName](workers[resourceName], out)
}
}
// runWorker starts the supplied manifold's worker and communicates it back to the
// loop goroutine; waits for worker completion; and communicates any error encountered
// back to the loop goroutine. It must not be run on the loop goroutine.
func (engine *engine) runWorker(name string, delay time.Duration, start StartFunc, getResource GetResourceFunc) {
// We may or may not send on started, but we *must* send on stopped.
engine.stopped <- stoppedTicket{name, func() error {
logger.Infof("starting %s manifold worker in %s...", name, delay)
select {
case <-time.After(delay):
case <-engine.tomb.Dying():
logger.Infof("not starting %s manifold worker (shutting down)", name)
return tomb.ErrDying
}
logger.Infof("starting %s manifold worker", name)
worker, err := start(getResource)
if err != nil {
logger.Infof("failed to start %s manifold worker: %v", name, err)
return err
}
logger.Infof("running %s manifold worker", name)
select {
case <-engine.tomb.Dying():
logger.Infof("stopping %s manifold worker (shutting down)", name)
worker.Kill()
case engine.started <- startedTicket{name, worker}:
logger.Infof("registered %s manifold worker", name)
}
return worker.Wait()
}()}
}
// gotStarted updates the engine to reflect the creation of a worker. It must
// only be called from the loop goroutine.
func (engine *engine) gotStarted(name string, worker worker.Worker) {
// Copy current info; check preconditions and abort the workers if we've
// already been asked to stop it.
info := engine.current[name]
switch {
case info.worker != nil:
engine.tomb.Kill(errors.Errorf("fatal: unexpected %s manifold worker start", name))
fallthrough
case info.stopping, engine.isDying():
logger.Infof("%s manifold worker no longer required", name)
worker.Kill()
default:
// It's fine to use this worker; update info and copy back.
logger.Infof("%s manifold worker started", name)
info.starting = false
info.worker = worker
engine.current[name] = info
// Any manifold that declares this one as an input needs to be restarted.
engine.bounceDependents(name)
}
}
// gotStopped updates the engine to reflect the demise of (or failure to create)
// a worker. It must only be called from the loop goroutine.
func (engine *engine) gotStopped(name string, err error) {
logger.Infof("%s manifold worker stopped: %v", name, err)
// Copy current info and check for reasons to stop the engine.
info := engine.current[name]
if info.stopped() {
engine.tomb.Kill(errors.Errorf("fatal: unexpected %s manifold worker stop", name))
} else if engine.isFatal(err) {
engine.tomb.Kill(err)
}
// Reset engine info; and bail out if we can be sure there's no need to bounce.
engine.current[name] = workerInfo{}
if engine.isDying() {
logger.Infof("permanently stopped %s manifold worker (shutting down)", name)
return
}
// If we told the worker to stop, we should start it again immediately,
// whatever else happened.
if info.stopping {
engine.start(name, engine.bounceDelay)
} else {
// If we didn't stop it ourselves, we need to interpret the error.
switch err {
case nil:
// Nothing went wrong; the task completed successfully. Nothing
// needs to be done (unless the inputs change, in which case it
// gets to check again).
case ErrUnmetDependencies:
// The task can't even start with the current state. Nothing more
// can be done (until the inputs change, in which case we retry
// anyway).
default:
// Something went wrong but we don't know what. Try again soon.
engine.start(name, engine.errorDelay)
}
}
// Manifolds that declared a dependency on this one only need to be notified
// if the worker has changed; if it was already nil, nobody needs to know.
if info.worker != nil {
engine.bounceDependents(name)
}
}
// stop ensures that any running or starting worker will be stopped in the
// near future. It must only be called from the loop goroutine.
func (engine *engine) stop(name string) {
// If already stopping or stopped, just don't do anything.
info := engine.current[name]
if info.stopping || info.stopped() {
return
}
// Update info, kill worker if present, and copy info back to engine.
info.stopping = true
if info.worker != nil {
info.worker.Kill()
}
engine.current[name] = info
}
// isDying returns true if the engine is shutting down. It's safe to call it
// from any goroutine.
func (engine *engine) isDying() bool {
select {
case <-engine.tomb.Dying():
return true
default:
return false
}
}
// allStopped returns true if no workers are running or starting. It must only
// be called from the loop goroutine.
func (engine *engine) allStopped() bool {
for _, info := range engine.current {
if !info.stopped() {
return false
}
}
return true
}
// bounceDependents starts every stopped dependent of the named manifold, and
// stops every started one (and trusts the rest of the engine to restart them).
// It must only be called from the loop goroutine.
func (engine *engine) bounceDependents(name string) {
logger.Infof("restarting dependents of %s manifold", name)
for _, dependentName := range engine.dependents[name] {
if engine.current[dependentName].stopped() {
engine.start(dependentName, engine.bounceDelay)
} else {
engine.stop(dependentName)
}
}
}
// workerInfo stores what an engine needs to know about the worker for a given
// Manifold.
type workerInfo struct {
starting bool
stopping bool
worker worker.Worker
}
// stopped returns true unless the worker is either assigned or starting.
func (info workerInfo) stopped() bool {
switch {
case info.worker != nil:
return false
case info.starting:
return false
}
return true
}
// installTicket is used by engine to induce installation of a named manifold
// and pass on any errors encountered in the process.
type installTicket struct {
name string
manifold Manifold
result chan<- error
}
// startedTicket is used by engine to notify the loop of the creation of the
// worker for a particular manifold.
type startedTicket struct {
name string
worker worker.Worker
}
// stoppedTicket is used by engine to notify the loop of the demise of (or
// failure to create) the worker for a particular manifold.
type stoppedTicket struct {
name string
error error
}
|
package machiner
import (
"launchpad.net/juju-core/container"
"launchpad.net/juju-core/log"
"launchpad.net/juju-core/state"
"launchpad.net/juju-core/state/watcher"
"launchpad.net/tomb"
)
// NewMachiner starts a machine agent running that
// deploy agents using the given container.
// The Machiner dies when it encounters an error.
func NewMachiner(machine *state.Machine, varDir string) *Machiner {
cont := &container.Simple{VarDir: varDir}
return newMachiner(machine, cont)
}
func newMachiner(machine *state.Machine, cont container.Container) *Machiner {
m := &Machiner{container: cont}
go m.loop(machine)
return m
}
// Machiner represents a running machine agent.
type Machiner struct {
tomb tomb.Tomb
container container.Container
}
func (m *Machiner) loop(machine *state.Machine) {
defer m.tomb.Done()
w := machine.WatchUnits()
defer watcher.Stop(w, &m.tomb)
// TODO read initial units, check if they're running
// and restart them if not. Also track units so
// that we don't deploy units that are already running.
for {
select {
case <-m.tomb.Dying():
return
case change, ok := <-w.Changes():
if !ok {
m.tomb.Kill(watcher.MustErr(w))
return
}
for _, u := range change.Removed {
if u.IsPrincipal() {
if err := m.container.Destroy(u); err != nil {
log.Printf("cannot destroy unit %s: %v", u.Name(), err)
}
}
}
for _, u := range change.Added {
if u.IsPrincipal() {
if err := m.container.Deploy(u); err != nil {
// TODO put unit into a queue to retry the deploy.
log.Printf("cannot deploy unit %s: %v", u.Name(), err)
}
}
}
}
}
}
// Wait waits until the Machiner has died, and returns the error encountered.
func (m *Machiner) Wait() error {
return m.tomb.Wait()
}
// Stop terminates the Machiner and returns any error that it encountered.
func (m *Machiner) Stop() error {
m.tomb.Kill(nil)
return m.tomb.Wait()
}
worker/machiner: fix comment
package machiner
import (
"launchpad.net/juju-core/container"
"launchpad.net/juju-core/log"
"launchpad.net/juju-core/state"
"launchpad.net/juju-core/state/watcher"
"launchpad.net/tomb"
)
// NewMachiner starts a machine agent running that
// deploys agents in the given directory.
// The Machiner dies when it encounters an error.
func NewMachiner(machine *state.Machine, varDir string) *Machiner {
cont := &container.Simple{VarDir: varDir}
return newMachiner(machine, cont)
}
func newMachiner(machine *state.Machine, cont container.Container) *Machiner {
m := &Machiner{container: cont}
go m.loop(machine)
return m
}
// Machiner represents a running machine agent.
type Machiner struct {
tomb tomb.Tomb
container container.Container
}
func (m *Machiner) loop(machine *state.Machine) {
defer m.tomb.Done()
w := machine.WatchUnits()
defer watcher.Stop(w, &m.tomb)
// TODO read initial units, check if they're running
// and restart them if not. Also track units so
// that we don't deploy units that are already running.
for {
select {
case <-m.tomb.Dying():
return
case change, ok := <-w.Changes():
if !ok {
m.tomb.Kill(watcher.MustErr(w))
return
}
for _, u := range change.Removed {
if u.IsPrincipal() {
if err := m.container.Destroy(u); err != nil {
log.Printf("cannot destroy unit %s: %v", u.Name(), err)
}
}
}
for _, u := range change.Added {
if u.IsPrincipal() {
if err := m.container.Deploy(u); err != nil {
// TODO put unit into a queue to retry the deploy.
log.Printf("cannot deploy unit %s: %v", u.Name(), err)
}
}
}
}
}
}
// Wait waits until the Machiner has died, and returns the error encountered.
func (m *Machiner) Wait() error {
return m.tomb.Wait()
}
// Stop terminates the Machiner and returns any error that it encountered.
func (m *Machiner) Stop() error {
m.tomb.Kill(nil)
return m.tomb.Wait()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.